1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/barrierSetAssembler.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "nativeInst_sparc.hpp"
  31 #include "oops/instanceOop.hpp"
  32 #include "oops/method.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "prims/methodHandles.hpp"
  36 #include "runtime/frame.inline.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubCodeGenerator.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #ifdef COMPILER2
  43 #include "opto/runtime.hpp"
  44 #endif
  45 
  46 // Declaration and definition of StubGenerator (no .hpp file).
  47 // For a more detailed description of the stub routine structure
  48 // see the comment in stubRoutines.hpp.
  49 
  50 #define __ _masm->
  51 
  52 #ifdef PRODUCT
  53 #define BLOCK_COMMENT(str) /* nothing */
  54 #else
  55 #define BLOCK_COMMENT(str) __ block_comment(str)
  56 #endif
  57 
  58 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  59 
  60 // Note:  The register L7 is used as L7_thread_cache, and may not be used
  61 //        any other way within this module.
  62 
  63 static const Register& Lstub_temp = L2;
  64 
  65 // -------------------------------------------------------------------------------------------------------------------------
  66 // Stub Code definitions
  67 
  68 class StubGenerator: public StubCodeGenerator {
  69  private:
  70 
  71 #ifdef PRODUCT
  72 #define inc_counter_np(a,b,c)
  73 #else
  74 #define inc_counter_np(counter, t1, t2) \
  75   BLOCK_COMMENT("inc_counter " #counter); \
  76   __ inc_counter(&counter, t1, t2);
  77 #endif
  78 
  79   //----------------------------------------------------------------------------------------------------
  80   // Call stubs are used to call Java from C
  81 
  82   address generate_call_stub(address& return_pc) {
  83     StubCodeMark mark(this, "StubRoutines", "call_stub");
  84     address start = __ pc();
  85 
  86     // Incoming arguments:
  87     //
  88     // o0         : call wrapper address
  89     // o1         : result (address)
  90     // o2         : result type
  91     // o3         : method
  92     // o4         : (interpreter) entry point
  93     // o5         : parameters (address)
  94     // [sp + 0x5c]: parameter size (in words)
  95     // [sp + 0x60]: thread
  96     //
  97     // +---------------+ <--- sp + 0
  98     // |               |
  99     // . reg save area .
 100     // |               |
 101     // +---------------+ <--- sp + 0x40
 102     // |               |
 103     // . extra 7 slots .
 104     // |               |
 105     // +---------------+ <--- sp + 0x5c
 106     // |  param. size  |
 107     // +---------------+ <--- sp + 0x60
 108     // |    thread     |
 109     // +---------------+
 110     // |               |
 111 
 112     // note: if the link argument position changes, adjust
 113     //       the code in frame::entry_frame_call_wrapper()
 114 
 115     const Argument link           = Argument(0, false); // used only for GC
 116     const Argument result         = Argument(1, false);
 117     const Argument result_type    = Argument(2, false);
 118     const Argument method         = Argument(3, false);
 119     const Argument entry_point    = Argument(4, false);
 120     const Argument parameters     = Argument(5, false);
 121     const Argument parameter_size = Argument(6, false);
 122     const Argument thread         = Argument(7, false);
 123 
 124     // setup thread register
 125     __ ld_ptr(thread.as_address(), G2_thread);
 126     __ reinit_heapbase();
 127 
 128 #ifdef ASSERT
 129     // make sure we have no pending exceptions
 130     { const Register t = G3_scratch;
 131       Label L;
 132       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
 133       __ br_null_short(t, Assembler::pt, L);
 134       __ stop("StubRoutines::call_stub: entered with pending exception");
 135       __ bind(L);
 136     }
 137 #endif
 138 
 139     // create activation frame & allocate space for parameters
 140     { const Register t = G3_scratch;
 141       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
 142       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
 143       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
 144       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
 145       __ neg(t);                                                // negate so it can be used with save
 146       __ save(SP, t, SP);                                       // setup new frame
 147     }
 148 
 149     // +---------------+ <--- sp + 0
 150     // |               |
 151     // . reg save area .
 152     // |               |
 153     // +---------------+ <--- sp + 0x40
 154     // |               |
 155     // . extra 7 slots .
 156     // |               |
 157     // +---------------+ <--- sp + 0x5c
 158     // |  empty slot   |      (only if parameter size is even)
 159     // +---------------+
 160     // |               |
 161     // .  parameters   .
 162     // |               |
 163     // +---------------+ <--- fp + 0
 164     // |               |
 165     // . reg save area .
 166     // |               |
 167     // +---------------+ <--- fp + 0x40
 168     // |               |
 169     // . extra 7 slots .
 170     // |               |
 171     // +---------------+ <--- fp + 0x5c
 172     // |  param. size  |
 173     // +---------------+ <--- fp + 0x60
 174     // |    thread     |
 175     // +---------------+
 176     // |               |
 177 
 178     // pass parameters if any
 179     BLOCK_COMMENT("pass parameters if any");
 180     { const Register src = parameters.as_in().as_register();
 181       const Register dst = Lentry_args;
 182       const Register tmp = G3_scratch;
 183       const Register cnt = G4_scratch;
 184 
 185       // test if any parameters & setup of Lentry_args
 186       Label exit;
 187       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
 188       __ add( FP, STACK_BIAS, dst );
 189       __ cmp_zero_and_br(Assembler::zero, cnt, exit);
 190       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
 191 
 192       // copy parameters if any
 193       Label loop;
 194       __ BIND(loop);
 195       // Store parameter value
 196       __ ld_ptr(src, 0, tmp);
 197       __ add(src, BytesPerWord, src);
 198       __ st_ptr(tmp, dst, 0);
 199       __ deccc(cnt);
 200       __ br(Assembler::greater, false, Assembler::pt, loop);
 201       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 202 
 203       // done
 204       __ BIND(exit);
 205     }
 206 
 207     // setup parameters, method & call Java function
 208 #ifdef ASSERT
 209     // layout_activation_impl checks it's notion of saved SP against
 210     // this register, so if this changes update it as well.
 211     const Register saved_SP = Lscratch;
 212     __ mov(SP, saved_SP);                               // keep track of SP before call
 213 #endif
 214 
 215     // setup parameters
 216     const Register t = G3_scratch;
 217     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
 218     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
 219     __ sub(FP, t, Gargs);                              // setup parameter pointer
 220     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
 221     __ mov(SP, O5_savedSP);
 222 
 223 
 224     // do the call
 225     //
 226     // the following register must be setup:
 227     //
 228     // G2_thread
 229     // G5_method
 230     // Gargs
 231     BLOCK_COMMENT("call Java function");
 232     __ jmpl(entry_point.as_in().as_register(), G0, O7);
 233     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
 234 
 235     BLOCK_COMMENT("call_stub_return_address:");
 236     return_pc = __ pc();
 237 
 238     // The callee, if it wasn't interpreted, can return with SP changed so
 239     // we can no longer assert of change of SP.
 240 
 241     // store result depending on type
 242     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
 243     //  is treated as T_INT)
 244     { const Register addr = result     .as_in().as_register();
 245       const Register type = result_type.as_in().as_register();
 246       Label is_long, is_float, is_double, is_object, exit;
 247       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
 248       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
 249       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
 250       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
 251       __ delayed()->nop();
 252 
 253       // store int result
 254       __ st(O0, addr, G0);
 255 
 256       __ BIND(exit);
 257       __ ret();
 258       __ delayed()->restore();
 259 
 260       __ BIND(is_object);
 261       __ ba(exit);
 262       __ delayed()->st_ptr(O0, addr, G0);
 263 
 264       __ BIND(is_float);
 265       __ ba(exit);
 266       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
 267 
 268       __ BIND(is_double);
 269       __ ba(exit);
 270       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 271 
 272       __ BIND(is_long);
 273       __ ba(exit);
 274       __ delayed()->st_long(O0, addr, G0);      // store entire long
 275      }
 276      return start;
 277   }
 278 
 279 
 280   //----------------------------------------------------------------------------------------------------
 281   // Return point for a Java call if there's an exception thrown in Java code.
 282   // The exception is caught and transformed into a pending exception stored in
 283   // JavaThread that can be tested from within the VM.
 284   //
 285   // Oexception: exception oop
 286 
 287   address generate_catch_exception() {
 288     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 289 
 290     address start = __ pc();
 291     // verify that thread corresponds
 292     __ verify_thread();
 293 
 294     const Register& temp_reg = Gtemp;
 295     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
 296     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
 297     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
 298 
 299     // set pending exception
 300     __ verify_oop(Oexception);
 301     __ st_ptr(Oexception, pending_exception_addr);
 302     __ set((intptr_t)__FILE__, temp_reg);
 303     __ st_ptr(temp_reg, exception_file_offset_addr);
 304     __ set((intptr_t)__LINE__, temp_reg);
 305     __ st(temp_reg, exception_line_offset_addr);
 306 
 307     // complete return to VM
 308     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 309 
 310     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
 311     __ jump_to(stub_ret, temp_reg);
 312     __ delayed()->nop();
 313 
 314     return start;
 315   }
 316 
 317 
 318   //----------------------------------------------------------------------------------------------------
 319   // Continuation point for runtime calls returning with a pending exception
 320   // The pending exception check happened in the runtime or native call stub
 321   // The pending exception in Thread is converted into a Java-level exception
 322   //
 323   // Contract with Java-level exception handler: O0 = exception
 324   //                                             O1 = throwing pc
 325 
 326   address generate_forward_exception() {
 327     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 328     address start = __ pc();
 329 
 330     // Upon entry, O7 has the return address returning into Java
 331     // (interpreted or compiled) code; i.e. the return address
 332     // becomes the throwing pc.
 333 
 334     const Register& handler_reg = Gtemp;
 335 
 336     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 337 
 338 #ifdef ASSERT
 339     // make sure that this code is only executed if there is a pending exception
 340     { Label L;
 341       __ ld_ptr(exception_addr, Gtemp);
 342       __ br_notnull_short(Gtemp, Assembler::pt, L);
 343       __ stop("StubRoutines::forward exception: no pending exception (1)");
 344       __ bind(L);
 345     }
 346 #endif
 347 
 348     // compute exception handler into handler_reg
 349     __ get_thread();
 350     __ ld_ptr(exception_addr, Oexception);
 351     __ verify_oop(Oexception);
 352     __ save_frame(0);             // compensates for compiler weakness
 353     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
 354     BLOCK_COMMENT("call exception_handler_for_return_address");
 355     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
 356     __ mov(O0, handler_reg);
 357     __ restore();                 // compensates for compiler weakness
 358 
 359     __ ld_ptr(exception_addr, Oexception);
 360     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
 361 
 362 #ifdef ASSERT
 363     // make sure exception is set
 364     { Label L;
 365       __ br_notnull_short(Oexception, Assembler::pt, L);
 366       __ stop("StubRoutines::forward exception: no pending exception (2)");
 367       __ bind(L);
 368     }
 369 #endif
 370     // jump to exception handler
 371     __ jmp(handler_reg, 0);
 372     // clear pending exception
 373     __ delayed()->st_ptr(G0, exception_addr);
 374 
 375     return start;
 376   }
 377 
 378   // Safefetch stubs.
 379   void generate_safefetch(const char* name, int size, address* entry,
 380                           address* fault_pc, address* continuation_pc) {
 381     // safefetch signatures:
 382     //   int      SafeFetch32(int*      adr, int      errValue);
 383     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
 384     //
 385     // arguments:
 386     //   o0 = adr
 387     //   o1 = errValue
 388     //
 389     // result:
 390     //   o0  = *adr or errValue
 391 
 392     StubCodeMark mark(this, "StubRoutines", name);
 393 
 394     // Entry point, pc or function descriptor.
 395     __ align(CodeEntryAlignment);
 396     *entry = __ pc();
 397 
 398     __ mov(O0, G1);  // g1 = o0
 399     __ mov(O1, O0);  // o0 = o1
 400     // Load *adr into c_rarg1, may fault.
 401     *fault_pc = __ pc();
 402     switch (size) {
 403       case 4:
 404         // int32_t
 405         __ ldsw(G1, 0, O0);  // o0 = [g1]
 406         break;
 407       case 8:
 408         // int64_t
 409         __ ldx(G1, 0, O0);   // o0 = [g1]
 410         break;
 411       default:
 412         ShouldNotReachHere();
 413     }
 414 
 415     // return errValue or *adr
 416     *continuation_pc = __ pc();
 417     // By convention with the trap handler we ensure there is a non-CTI
 418     // instruction in the trap shadow.
 419     __ nop();
 420     __ retl();
 421     __ delayed()->nop();
 422   }
 423 
 424   //------------------------------------------------------------------------------------------------------------------------
 425   // Continuation point for throwing of implicit exceptions that are not handled in
 426   // the current activation. Fabricates an exception oop and initiates normal
 427   // exception dispatching in this frame. Only callee-saved registers are preserved
 428   // (through the normal register window / RegisterMap handling).
 429   // If the compiler needs all registers to be preserved between the fault
 430   // point and the exception handler then it must assume responsibility for that in
 431   // AbstractCompiler::continuation_for_implicit_null_exception or
 432   // continuation_for_implicit_division_by_zero_exception. All other implicit
 433   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
 434   // either at call sites or otherwise assume that stack unwinding will be initiated,
 435   // so caller saved registers were assumed volatile in the compiler.
 436 
 437   // Note that we generate only this stub into a RuntimeStub, because it needs to be
 438   // properly traversed and ignored during GC, so we change the meaning of the "__"
 439   // macro within this method.
 440 #undef __
 441 #define __ masm->
 442 
 443   address generate_throw_exception(const char* name, address runtime_entry,
 444                                    Register arg1 = noreg, Register arg2 = noreg) {
 445 #ifdef ASSERT
 446     int insts_size = VerifyThread ? 1 * K : 600;
 447 #else
 448     int insts_size = VerifyThread ? 1 * K : 256;
 449 #endif /* ASSERT */
 450     int locs_size  = 32;
 451 
 452     CodeBuffer      code(name, insts_size, locs_size);
 453     MacroAssembler* masm = new MacroAssembler(&code);
 454 
 455     __ verify_thread();
 456 
 457     // This is an inlined and slightly modified version of call_VM
 458     // which has the ability to fetch the return PC out of thread-local storage
 459     __ assert_not_delayed();
 460 
 461     // Note that we always push a frame because on the SPARC
 462     // architecture, for all of our implicit exception kinds at call
 463     // sites, the implicit exception is taken before the callee frame
 464     // is pushed.
 465     __ save_frame(0);
 466 
 467     int frame_complete = __ offset();
 468 
 469     // Note that we always have a runtime stub frame on the top of stack by this point
 470     Register last_java_sp = SP;
 471     // 64-bit last_java_sp is biased!
 472     __ set_last_Java_frame(last_java_sp, G0);
 473     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
 474     __ save_thread(noreg);
 475     if (arg1 != noreg) {
 476       assert(arg2 != O1, "clobbered");
 477       __ mov(arg1, O1);
 478     }
 479     if (arg2 != noreg) {
 480       __ mov(arg2, O2);
 481     }
 482     // do the call
 483     BLOCK_COMMENT("call runtime_entry");
 484     __ call(runtime_entry, relocInfo::runtime_call_type);
 485     if (!VerifyThread)
 486       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
 487     else
 488       __ delayed()->nop();             // (thread already passed)
 489     __ restore_thread(noreg);
 490     __ reset_last_Java_frame();
 491 
 492     // check for pending exceptions. use Gtemp as scratch register.
 493 #ifdef ASSERT
 494     Label L;
 495 
 496     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 497     Register scratch_reg = Gtemp;
 498     __ ld_ptr(exception_addr, scratch_reg);
 499     __ br_notnull_short(scratch_reg, Assembler::pt, L);
 500     __ should_not_reach_here();
 501     __ bind(L);
 502 #endif // ASSERT
 503     BLOCK_COMMENT("call forward_exception_entry");
 504     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
 505     // we use O7 linkage so that forward_exception_entry has the issuing PC
 506     __ delayed()->restore();
 507 
 508     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
 509     return stub->entry_point();
 510   }
 511 
 512 #undef __
 513 #define __ _masm->
 514 
 515 
 516   // Generate a routine that sets all the registers so we
 517   // can tell if the stop routine prints them correctly.
 518   address generate_test_stop() {
 519     StubCodeMark mark(this, "StubRoutines", "test_stop");
 520     address start = __ pc();
 521 
 522     int i;
 523 
 524     __ save_frame(0);
 525 
 526     static jfloat zero = 0.0, one = 1.0;
 527 
 528     // put addr in L0, then load through L0 to F0
 529     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
 530     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
 531 
 532     // use add to put 2..18 in F2..F18
 533     for ( i = 2;  i <= 18;  ++i ) {
 534       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
 535     }
 536 
 537     // Now put double 2 in F16, double 18 in F18
 538     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
 539     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
 540 
 541     // use add to put 20..32 in F20..F32
 542     for (i = 20; i < 32; i += 2) {
 543       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
 544     }
 545 
 546     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
 547     for ( i = 0; i < 8; ++i ) {
 548       if (i < 6) {
 549         __ set(     i, as_iRegister(i));
 550         __ set(16 + i, as_oRegister(i));
 551         __ set(24 + i, as_gRegister(i));
 552       }
 553       __ set( 8 + i, as_lRegister(i));
 554     }
 555 
 556     __ stop("testing stop");
 557 
 558 
 559     __ ret();
 560     __ delayed()->restore();
 561 
 562     return start;
 563   }
 564 
 565 
 566   address generate_stop_subroutine() {
 567     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 568     address start = __ pc();
 569 
 570     __ stop_subroutine();
 571 
 572     return start;
 573   }
 574 
 575   address generate_flush_callers_register_windows() {
 576     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 577     address start = __ pc();
 578 
 579     __ flushw();
 580     __ retl(false);
 581     __ delayed()->add( FP, STACK_BIAS, O0 );
 582     // The returned value must be a stack pointer whose register save area
 583     // is flushed, and will stay flushed while the caller executes.
 584 
 585     return start;
 586   }
 587 
 588   // Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
 589   // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
 590   //
 591   // Arguments:
 592   //
 593   //      exchange_value: O0
 594   //      dest:           O1
 595   //
 596   // Results:
 597   //
 598   //     O0: the value previously stored in dest
 599   //
 600   address generate_atomic_xchg() {
 601     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 602     address start = __ pc();
 603 
 604     if (UseCASForSwap) {
 605       // Use CAS instead of swap, just in case the MP hardware
 606       // prefers to work with just one kind of synch. instruction.
 607       Label retry;
 608       __ BIND(retry);
 609       __ mov(O0, O3);       // scratch copy of exchange value
 610       __ ld(O1, 0, O2);     // observe the previous value
 611       // try to replace O2 with O3
 612       __ cas(O1, O2, O3);
 613       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 614 
 615       __ retl(false);
 616       __ delayed()->mov(O2, O0);  // report previous value to caller
 617     } else {
 618       __ retl(false);
 619       __ delayed()->swap(O1, 0, O0);
 620     }
 621 
 622     return start;
 623   }
 624 
 625 
 626   // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 627   // used by Atomic::cmpxchg(volatile jint* dest, jint compare_value, jint exchange_value)
 628   //
 629   // Arguments:
 630   //
 631   //      exchange_value: O0
 632   //      dest:           O1
 633   //      compare_value:  O2
 634   //
 635   // Results:
 636   //
 637   //     O0: the value previously stored in dest
 638   //
 639   address generate_atomic_cmpxchg() {
 640     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 641     address start = __ pc();
 642 
 643     // cmpxchg(dest, compare_value, exchange_value)
 644     __ cas(O1, O2, O0);
 645     __ retl(false);
 646     __ delayed()->nop();
 647 
 648     return start;
 649   }
 650 
 651   // Implementation of jlong atomic_cmpxchg_long(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 652   // used by Atomic::cmpxchg(volatile jlong *dest, jlong compare_value, jlong exchange_value)
 653   //
 654   // Arguments:
 655   //
 656   //      exchange_value: O1:O0
 657   //      dest:           O2
 658   //      compare_value:  O4:O3
 659   //
 660   // Results:
 661   //
 662   //     O1:O0: the value previously stored in dest
 663   //
 664   // Overwrites: G1,G2,G3
 665   //
 666   address generate_atomic_cmpxchg_long() {
 667     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 668     address start = __ pc();
 669 
 670     __ sllx(O0, 32, O0);
 671     __ srl(O1, 0, O1);
 672     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 673     __ sllx(O3, 32, O3);
 674     __ srl(O4, 0, O4);
 675     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 676     __ casx(O2, O3, O0);
 677     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 678     __ retl(false);
 679     __ delayed()->srlx(O0, 32, O0);
 680 
 681     return start;
 682   }
 683 
 684 
 685   // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
 686   // used by Atomic::add(volatile jint* dest, jint add_value)
 687   //
 688   // Arguments:
 689   //
 690   //      add_value: O0   (e.g., +1 or -1)
 691   //      dest:      O1
 692   //
 693   // Results:
 694   //
 695   //     O0: the new value stored in dest
 696   //
 697   // Overwrites: O3
 698   //
 699   address generate_atomic_add() {
 700     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 701     address start = __ pc();
 702     __ BIND(_atomic_add_stub);
 703 
 704     Label(retry);
 705     __ BIND(retry);
 706 
 707     __ lduw(O1, 0, O2);
 708     __ add(O0, O2, O3);
 709     __ cas(O1, O2, O3);
 710     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 711     __ retl(false);
 712     __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 713 
 714     return start;
 715   }
 716   Label _atomic_add_stub;  // called from other stubs
 717 
 718 
 719   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 720   // Arguments :
 721   //
 722   //      ret  : O0, returned
 723   //      icc/xcc: set as O0 (depending on wordSize)
 724   //      sub  : O1, argument, not changed
 725   //      super: O2, argument, not changed
 726   //      raddr: O7, blown by call
 727   address generate_partial_subtype_check() {
 728     __ align(CodeEntryAlignment);
 729     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 730     address start = __ pc();
 731     Label miss;
 732 
 733     __ save_frame(0);
 734     Register Rret   = I0;
 735     Register Rsub   = I1;
 736     Register Rsuper = I2;
 737 
 738     Register L0_ary_len = L0;
 739     Register L1_ary_ptr = L1;
 740     Register L2_super   = L2;
 741     Register L3_index   = L3;
 742 
 743     __ check_klass_subtype_slow_path(Rsub, Rsuper,
 744                                      L0, L1, L2, L3,
 745                                      NULL, &miss);
 746 
 747     // Match falls through here.
 748     __ addcc(G0,0,Rret);        // set Z flags, Z result
 749 
 750     __ ret();                   // Result in Rret is zero; flags set to Z
 751     __ delayed()->restore();
 752 
 753     __ BIND(miss);
 754     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 755 
 756     __ ret();                   // Result in Rret is != 0; flags set to NZ
 757     __ delayed()->restore();
 758 
 759     return start;
 760   }
 761 
 762 
 763   // Called from MacroAssembler::verify_oop
 764   //
 765   address generate_verify_oop_subroutine() {
 766     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
 767 
 768     address start = __ pc();
 769 
 770     __ verify_oop_subroutine();
 771 
 772     return start;
 773   }
 774 
 775 
 776   //
 777   // Verify that a register contains clean 32-bits positive value
 778   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
 779   //
 780   //  Input:
 781   //    Rint  -  32-bits value
 782   //    Rtmp  -  scratch
 783   //
 784   void assert_clean_int(Register Rint, Register Rtmp) {
 785   #if defined(ASSERT)
 786     __ signx(Rint, Rtmp);
 787     __ cmp(Rint, Rtmp);
 788     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
 789   #endif
 790   }
 791 
 792   //
 793   //  Generate overlap test for array copy stubs
 794   //
 795   //  Input:
 796   //    O0    -  array1
 797   //    O1    -  array2
 798   //    O2    -  element count
 799   //
 800   //  Kills temps:  O3, O4
 801   //
 802   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
 803     assert(no_overlap_target != NULL, "must be generated");
 804     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
 805   }
 806   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
 807     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
 808   }
 809   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
 810     const Register from       = O0;
 811     const Register to         = O1;
 812     const Register count      = O2;
 813     const Register to_from    = O3; // to - from
 814     const Register byte_count = O4; // count << log2_elem_size
 815 
 816       __ subcc(to, from, to_from);
 817       __ sll_ptr(count, log2_elem_size, byte_count);
 818       if (NOLp == NULL)
 819         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
 820       else
 821         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
 822       __ delayed()->cmp(to_from, byte_count);
 823       if (NOLp == NULL)
 824         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
 825       else
 826         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
 827       __ delayed()->nop();
 828   }
 829 
 830 
 831   //
 832   // Generate main code for disjoint arraycopy
 833   //
 834   typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
 835                                               Label& L_loop, bool use_prefetch, bool use_bis);
 836 
 837   void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
 838                           int iter_size, StubGenerator::CopyLoopFunc copy_loop_func) {
 839     Label L_copy;
 840 
 841     assert(log2_elem_size <= 3, "the following code should be changed");
 842     int count_dec = 16>>log2_elem_size;
 843 
 844     int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
 845     assert(prefetch_dist < 4096, "invalid value");
 846     prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
 847     int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
 848 
 849     if (UseBlockCopy) {
 850       Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
 851 
 852       // 64 bytes tail + bytes copied in one loop iteration
 853       int tail_size = 64 + iter_size;
 854       int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
 855       // Use BIS copy only for big arrays since it requires membar.
 856       __ set(block_copy_count, O4);
 857       __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
 858       // This code is for disjoint source and destination:
 859       //   to <= from || to >= from+count
 860       // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
 861       __ sub(from, to, O4);
 862       __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
 863       __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
 864 
 865       __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
 866       // BIS should not be used to copy tail (64 bytes+iter_size)
 867       // to avoid zeroing of following values.
 868       __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
 869 
 870       if (prefetch_count > 0) { // rounded up to one iteration count
 871         // Do prefetching only if copy size is bigger
 872         // than prefetch distance.
 873         __ set(prefetch_count, O4);
 874         __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
 875         __ sub(count, O4, count);
 876 
 877         (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
 878         __ set(prefetch_count, O4);
 879         __ add(count, O4, count);
 880 
 881       } // prefetch_count > 0
 882 
 883       (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
 884       __ add(count, (tail_size>>log2_elem_size), count); // restore count
 885 
 886       __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
 887       // BIS needs membar.
 888       __ membar(Assembler::StoreLoad);
 889       // Copy tail
 890       __ ba_short(L_copy);
 891 
 892       __ BIND(L_skip_block_copy);
 893     } // UseBlockCopy
 894 
 895     if (prefetch_count > 0) { // rounded up to one iteration count
 896       // Do prefetching only if copy size is bigger
 897       // than prefetch distance.
 898       __ set(prefetch_count, O4);
 899       __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
 900       __ sub(count, O4, count);
 901 
 902       Label L_copy_prefetch;
 903       (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
 904       __ set(prefetch_count, O4);
 905       __ add(count, O4, count);
 906 
 907     } // prefetch_count > 0
 908 
 909     (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
 910   }
 911 
 912 
 913 
 914   //
 915   // Helper methods for copy_16_bytes_forward_with_shift()
 916   //
 917   void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
 918                                 Label& L_loop, bool use_prefetch, bool use_bis) {
 919 
 920     const Register left_shift  = G1; // left  shift bit counter
 921     const Register right_shift = G5; // right shift bit counter
 922 
 923     __ align(OptoLoopAlignment);
 924     __ BIND(L_loop);
 925     if (use_prefetch) {
 926       if (ArraycopySrcPrefetchDistance > 0) {
 927         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
 928       }
 929       if (ArraycopyDstPrefetchDistance > 0) {
 930         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
 931       }
 932     }
 933     __ ldx(from, 0, O4);
 934     __ ldx(from, 8, G4);
 935     __ inc(to, 16);
 936     __ inc(from, 16);
 937     __ deccc(count, count_dec); // Can we do next iteration after this one?
 938     __ srlx(O4, right_shift, G3);
 939     __ bset(G3, O3);
 940     __ sllx(O4, left_shift,  O4);
 941     __ srlx(G4, right_shift, G3);
 942     __ bset(G3, O4);
 943     if (use_bis) {
 944       __ stxa(O3, to, -16);
 945       __ stxa(O4, to, -8);
 946     } else {
 947       __ stx(O3, to, -16);
 948       __ stx(O4, to, -8);
 949     }
 950     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
 951     __ delayed()->sllx(G4, left_shift,  O3);
 952   }
 953 
 954   // Copy big chunks forward with shift
 955   //
 956   // Inputs:
 957   //   from      - source arrays
 958   //   to        - destination array aligned to 8-bytes
 959   //   count     - elements count to copy >= the count equivalent to 16 bytes
 960   //   count_dec - elements count's decrement equivalent to 16 bytes
 961   //   L_copy_bytes - copy exit label
 962   //
 963   void copy_16_bytes_forward_with_shift(Register from, Register to,
 964                      Register count, int log2_elem_size, Label& L_copy_bytes) {
 965     Label L_aligned_copy, L_copy_last_bytes;
 966     assert(log2_elem_size <= 3, "the following code should be changed");
 967     int count_dec = 16>>log2_elem_size;
 968 
 969     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
 970     __ andcc(from, 7, G1); // misaligned bytes
 971     __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
 972     __ delayed()->nop();
 973 
 974     const Register left_shift  = G1; // left  shift bit counter
 975     const Register right_shift = G5; // right shift bit counter
 976 
 977     __ sll(G1, LogBitsPerByte, left_shift);
 978     __ mov(64, right_shift);
 979     __ sub(right_shift, left_shift, right_shift);
 980 
 981     //
 982     // Load 2 aligned 8-bytes chunks and use one from previous iteration
 983     // to form 2 aligned 8-bytes chunks to store.
 984     //
 985     __ dec(count, count_dec);   // Pre-decrement 'count'
 986     __ andn(from, 7, from);     // Align address
 987     __ ldx(from, 0, O3);
 988     __ inc(from, 8);
 989     __ sllx(O3, left_shift,  O3);
 990 
 991     disjoint_copy_core(from, to, count, log2_elem_size, 16, &StubGenerator::copy_16_bytes_shift_loop);
 992 
 993     __ inccc(count, count_dec>>1 ); // + 8 bytes
 994     __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
 995     __ delayed()->inc(count, count_dec>>1); // restore 'count'
 996 
 997     // copy 8 bytes, part of them already loaded in O3
 998     __ ldx(from, 0, O4);
 999     __ inc(to, 8);
1000     __ inc(from, 8);
1001     __ srlx(O4, right_shift, G3);
1002     __ bset(O3, G3);
1003     __ stx(G3, to, -8);
1004 
1005     __ BIND(L_copy_last_bytes);
1006     __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1007     __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1008     __ delayed()->sub(from, right_shift, from);       // restore address
1009 
1010     __ BIND(L_aligned_copy);
1011   }
1012 
1013   // Copy big chunks backward with shift
1014   //
1015   // Inputs:
1016   //   end_from  - source arrays end address
1017   //   end_to    - destination array end address aligned to 8-bytes
1018   //   count     - elements count to copy >= the count equivalent to 16 bytes
1019   //   count_dec - elements count's decrement equivalent to 16 bytes
1020   //   L_aligned_copy - aligned copy exit label
1021   //   L_copy_bytes   - copy exit label
1022   //
1023   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1024                      Register count, int count_dec,
1025                      Label& L_aligned_copy, Label& L_copy_bytes) {
1026     Label L_loop, L_copy_last_bytes;
1027 
1028     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1029       __ andcc(end_from, 7, G1); // misaligned bytes
1030       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1031       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1032 
1033     const Register left_shift  = G1; // left  shift bit counter
1034     const Register right_shift = G5; // right shift bit counter
1035 
1036       __ sll(G1, LogBitsPerByte, left_shift);
1037       __ mov(64, right_shift);
1038       __ sub(right_shift, left_shift, right_shift);
1039 
1040     //
1041     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1042     // to form 2 aligned 8-bytes chunks to store.
1043     //
1044       __ andn(end_from, 7, end_from);     // Align address
1045       __ ldx(end_from, 0, O3);
1046       __ align(OptoLoopAlignment);
1047     __ BIND(L_loop);
1048       __ ldx(end_from, -8, O4);
1049       __ deccc(count, count_dec); // Can we do next iteration after this one?
1050       __ ldx(end_from, -16, G4);
1051       __ dec(end_to, 16);
1052       __ dec(end_from, 16);
1053       __ srlx(O3, right_shift, O3);
1054       __ sllx(O4, left_shift,  G3);
1055       __ bset(G3, O3);
1056       __ stx(O3, end_to, 8);
1057       __ srlx(O4, right_shift, O4);
1058       __ sllx(G4, left_shift,  G3);
1059       __ bset(G3, O4);
1060       __ stx(O4, end_to, 0);
1061       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1062       __ delayed()->mov(G4, O3);
1063 
1064       __ inccc(count, count_dec>>1 ); // + 8 bytes
1065       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1066       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1067 
1068       // copy 8 bytes, part of them already loaded in O3
1069       __ ldx(end_from, -8, O4);
1070       __ dec(end_to, 8);
1071       __ dec(end_from, 8);
1072       __ srlx(O3, right_shift, O3);
1073       __ sllx(O4, left_shift,  G3);
1074       __ bset(O3, G3);
1075       __ stx(G3, end_to, 0);
1076 
1077     __ BIND(L_copy_last_bytes);
1078       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
1079       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1080       __ delayed()->add(end_from, left_shift, end_from); // restore address
1081   }
1082 
1083   address generate_unsafecopy_common_error_exit() {
1084     address start_pc = __ pc();
1085     if (UseBlockCopy) {
1086       __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
1087       __ membar(Assembler::StoreLoad);
1088     }
1089     __ retl();
1090     __ delayed()->mov(G0, O0); // return 0
1091     return start_pc;
1092   }
1093 
1094   //
1095   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
1096   //  "from" and "to" addresses are assumed to be heapword aligned.
1097   //
1098   // Arguments for generated stub:
1099   //      from:  O0
1100   //      to:    O1
1101   //      count: O2 treated as signed
1102   //
1103   address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
1104     __ align(CodeEntryAlignment);
1105     StubCodeMark mark(this, "StubRoutines", name);
1106     address start = __ pc();
1107 
1108     Label L_skip_alignment, L_align;
1109     Label L_copy_byte, L_copy_byte_loop, L_exit;
1110 
1111     const Register from      = O0;   // source array address
1112     const Register to        = O1;   // destination array address
1113     const Register count     = O2;   // elements count
1114     const Register offset    = O5;   // offset from start of arrays
1115     // O3, O4, G3, G4 are used as temp registers
1116 
1117     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1118 
1119     if (entry != NULL) {
1120       *entry = __ pc();
1121       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1122       BLOCK_COMMENT("Entry:");
1123     }
1124 
1125     {
1126       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
1127       UnsafeCopyMemoryMark ucmm(this, !aligned, false);
1128 
1129       // for short arrays, just do single element copy
1130       __ cmp(count, 23); // 16 + 7
1131       __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1132       __ delayed()->mov(G0, offset);
1133 
1134       if (aligned) {
1135         // 'aligned' == true when it is known statically during compilation
1136         // of this arraycopy call site that both 'from' and 'to' addresses
1137         // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1138         //
1139         // Aligned arrays have 4 bytes alignment in 32-bits VM
1140         // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1141         //
1142       } else {
1143         // copy bytes to align 'to' on 8 byte boundary
1144         __ andcc(to, 7, G1); // misaligned bytes
1145         __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1146         __ delayed()->neg(G1);
1147         __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
1148         __ sub(count, G1, count);
1149       __ BIND(L_align);
1150         __ ldub(from, 0, O3);
1151         __ deccc(G1);
1152         __ inc(from);
1153         __ stb(O3, to, 0);
1154         __ br(Assembler::notZero, false, Assembler::pt, L_align);
1155         __ delayed()->inc(to);
1156       __ BIND(L_skip_alignment);
1157       }
1158       if (!aligned) {
1159         // Copy with shift 16 bytes per iteration if arrays do not have
1160         // the same alignment mod 8, otherwise fall through to the next
1161         // code for aligned copy.
1162         // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1163         // Also jump over aligned copy after the copy with shift completed.
1164 
1165         copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
1166       }
1167 
1168       // Both array are 8 bytes aligned, copy 16 bytes at a time
1169       __ and3(count, 7, G4); // Save count
1170       __ srl(count, 3, count);
1171       generate_disjoint_long_copy_core(aligned);
1172       __ mov(G4, count);     // Restore count
1173 
1174       // copy tailing bytes
1175       __ BIND(L_copy_byte);
1176         __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1177         __ align(OptoLoopAlignment);
1178       __ BIND(L_copy_byte_loop);
1179         __ ldub(from, offset, O3);
1180         __ deccc(count);
1181         __ stb(O3, to, offset);
1182         __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1183         __ delayed()->inc(offset);
1184     }
1185 
1186     __ BIND(L_exit);
1187       // O3, O4 are used as temp registers
1188       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1189       __ retl();
1190       __ delayed()->mov(G0, O0); // return 0
1191     return start;
1192   }
1193 
1194   //
1195   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
1196   //  "from" and "to" addresses are assumed to be heapword aligned.
1197   //
1198   // Arguments for generated stub:
1199   //      from:  O0
1200   //      to:    O1
1201   //      count: O2 treated as signed
1202   //
1203   address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1204                                       address *entry, const char *name) {
1205     // Do reverse copy.
1206 
1207     __ align(CodeEntryAlignment);
1208     StubCodeMark mark(this, "StubRoutines", name);
1209     address start = __ pc();
1210 
1211     Label L_skip_alignment, L_align, L_aligned_copy;
1212     Label L_copy_byte, L_copy_byte_loop, L_exit;
1213 
1214     const Register from      = O0;   // source array address
1215     const Register to        = O1;   // destination array address
1216     const Register count     = O2;   // elements count
1217     const Register end_from  = from; // source array end address
1218     const Register end_to    = to;   // destination array end address
1219 
1220     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1221 
1222     if (entry != NULL) {
1223       *entry = __ pc();
1224       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1225       BLOCK_COMMENT("Entry:");
1226     }
1227 
1228     array_overlap_test(nooverlap_target, 0);
1229 
1230     {
1231       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
1232       UnsafeCopyMemoryMark ucmm(this, !aligned, false);
1233 
1234       __ add(to, count, end_to);       // offset after last copied element
1235 
1236       // for short arrays, just do single element copy
1237       __ cmp(count, 23); // 16 + 7
1238       __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1239       __ delayed()->add(from, count, end_from);
1240 
1241       {
1242         // Align end of arrays since they could be not aligned even
1243         // when arrays itself are aligned.
1244 
1245         // copy bytes to align 'end_to' on 8 byte boundary
1246         __ andcc(end_to, 7, G1); // misaligned bytes
1247         __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1248         __ delayed()->nop();
1249         __ sub(count, G1, count);
1250       __ BIND(L_align);
1251         __ dec(end_from);
1252         __ dec(end_to);
1253         __ ldub(end_from, 0, O3);
1254         __ deccc(G1);
1255         __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1256         __ delayed()->stb(O3, end_to, 0);
1257       __ BIND(L_skip_alignment);
1258       }
1259       if (aligned) {
1260         // Both arrays are aligned to 8-bytes in 64-bits VM.
1261         // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1262         // in unaligned case.
1263         __ dec(count, 16);
1264       } else {
1265         // Copy with shift 16 bytes per iteration if arrays do not have
1266         // the same alignment mod 8, otherwise jump to the next
1267         // code for aligned copy (and substracting 16 from 'count' before jump).
1268         // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1269         // Also jump over aligned copy after the copy with shift completed.
1270 
1271        copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1272                                           L_aligned_copy, L_copy_byte);
1273       }
1274       // copy 4 elements (16 bytes) at a time
1275         __ align(OptoLoopAlignment);
1276       __ BIND(L_aligned_copy);
1277         __ dec(end_from, 16);
1278         __ ldx(end_from, 8, O3);
1279         __ ldx(end_from, 0, O4);
1280         __ dec(end_to, 16);
1281         __ deccc(count, 16);
1282         __ stx(O3, end_to, 8);
1283         __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1284         __ delayed()->stx(O4, end_to, 0);
1285         __ inc(count, 16);
1286 
1287       // copy 1 element (2 bytes) at a time
1288       __ BIND(L_copy_byte);
1289         __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1290         __ align(OptoLoopAlignment);
1291       __ BIND(L_copy_byte_loop);
1292         __ dec(end_from);
1293         __ dec(end_to);
1294         __ ldub(end_from, 0, O4);
1295         __ deccc(count);
1296         __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1297         __ delayed()->stb(O4, end_to, 0);
1298     }
1299 
1300     __ BIND(L_exit);
1301     // O3, O4 are used as temp registers
1302     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1303     __ retl();
1304     __ delayed()->mov(G0, O0); // return 0
1305     return start;
1306   }
1307 
1308   //
1309   //  Generate stub for disjoint short copy.  If "aligned" is true, the
1310   //  "from" and "to" addresses are assumed to be heapword aligned.
1311   //
1312   // Arguments for generated stub:
1313   //      from:  O0
1314   //      to:    O1
1315   //      count: O2 treated as signed
1316   //
1317   address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
1318     __ align(CodeEntryAlignment);
1319     StubCodeMark mark(this, "StubRoutines", name);
1320     address start = __ pc();
1321 
1322     Label L_skip_alignment, L_skip_alignment2;
1323     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1324 
1325     const Register from      = O0;   // source array address
1326     const Register to        = O1;   // destination array address
1327     const Register count     = O2;   // elements count
1328     const Register offset    = O5;   // offset from start of arrays
1329     // O3, O4, G3, G4 are used as temp registers
1330 
1331     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1332 
1333     if (entry != NULL) {
1334       *entry = __ pc();
1335       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1336       BLOCK_COMMENT("Entry:");
1337     }
1338 
1339     {
1340       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
1341       UnsafeCopyMemoryMark ucmm(this, !aligned, false);
1342       // for short arrays, just do single element copy
1343       __ cmp(count, 11); // 8 + 3  (22 bytes)
1344       __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1345       __ delayed()->mov(G0, offset);
1346 
1347       if (aligned) {
1348         // 'aligned' == true when it is known statically during compilation
1349         // of this arraycopy call site that both 'from' and 'to' addresses
1350         // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1351         //
1352         // Aligned arrays have 4 bytes alignment in 32-bits VM
1353         // and 8 bytes - in 64-bits VM.
1354         //
1355       } else {
1356         // copy 1 element if necessary to align 'to' on an 4 bytes
1357         __ andcc(to, 3, G0);
1358         __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1359         __ delayed()->lduh(from, 0, O3);
1360         __ inc(from, 2);
1361         __ inc(to, 2);
1362         __ dec(count);
1363         __ sth(O3, to, -2);
1364       __ BIND(L_skip_alignment);
1365 
1366         // copy 2 elements to align 'to' on an 8 byte boundary
1367         __ andcc(to, 7, G0);
1368         __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1369         __ delayed()->lduh(from, 0, O3);
1370         __ dec(count, 2);
1371         __ lduh(from, 2, O4);
1372         __ inc(from, 4);
1373         __ inc(to, 4);
1374         __ sth(O3, to, -4);
1375         __ sth(O4, to, -2);
1376       __ BIND(L_skip_alignment2);
1377       }
1378       if (!aligned) {
1379         // Copy with shift 16 bytes per iteration if arrays do not have
1380         // the same alignment mod 8, otherwise fall through to the next
1381         // code for aligned copy.
1382         // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1383         // Also jump over aligned copy after the copy with shift completed.
1384 
1385         copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
1386       }
1387 
1388       // Both array are 8 bytes aligned, copy 16 bytes at a time
1389         __ and3(count, 3, G4); // Save
1390         __ srl(count, 2, count);
1391        generate_disjoint_long_copy_core(aligned);
1392         __ mov(G4, count); // restore
1393 
1394       // copy 1 element at a time
1395       __ BIND(L_copy_2_bytes);
1396         __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1397         __ align(OptoLoopAlignment);
1398       __ BIND(L_copy_2_bytes_loop);
1399         __ lduh(from, offset, O3);
1400         __ deccc(count);
1401         __ sth(O3, to, offset);
1402         __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1403         __ delayed()->inc(offset, 2);
1404     }
1405 
1406     __ BIND(L_exit);
1407       // O3, O4 are used as temp registers
1408       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1409       __ retl();
1410       __ delayed()->mov(G0, O0); // return 0
1411     return start;
1412   }
1413 
1414   //
1415   //  Generate stub for disjoint short fill.  If "aligned" is true, the
1416   //  "to" address is assumed to be heapword aligned.
1417   //
1418   // Arguments for generated stub:
1419   //      to:    O0
1420   //      value: O1
1421   //      count: O2 treated as signed
1422   //
1423   address generate_fill(BasicType t, bool aligned, const char* name) {
1424     __ align(CodeEntryAlignment);
1425     StubCodeMark mark(this, "StubRoutines", name);
1426     address start = __ pc();
1427 
1428     const Register to        = O0;   // source array address
1429     const Register value     = O1;   // fill value
1430     const Register count     = O2;   // elements count
1431     // O3 is used as a temp register
1432 
1433     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1434 
1435     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
1436     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
1437 
1438     int shift = -1;
1439     switch (t) {
1440        case T_BYTE:
1441         shift = 2;
1442         break;
1443        case T_SHORT:
1444         shift = 1;
1445         break;
1446       case T_INT:
1447          shift = 0;
1448         break;
1449       default: ShouldNotReachHere();
1450     }
1451 
1452     BLOCK_COMMENT("Entry:");
1453 
1454     if (t == T_BYTE) {
1455       // Zero extend value
1456       __ and3(value, 0xff, value);
1457       __ sllx(value, 8, O3);
1458       __ or3(value, O3, value);
1459     }
1460     if (t == T_SHORT) {
1461       // Zero extend value
1462       __ sllx(value, 48, value);
1463       __ srlx(value, 48, value);
1464     }
1465     if (t == T_BYTE || t == T_SHORT) {
1466       __ sllx(value, 16, O3);
1467       __ or3(value, O3, value);
1468     }
1469 
1470     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
1471     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
1472     __ delayed()->andcc(count, 1, G0);
1473 
1474     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1475       // align source address at 4 bytes address boundary
1476       if (t == T_BYTE) {
1477         // One byte misalignment happens only for byte arrays
1478         __ andcc(to, 1, G0);
1479         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
1480         __ delayed()->nop();
1481         __ stb(value, to, 0);
1482         __ inc(to, 1);
1483         __ dec(count, 1);
1484         __ BIND(L_skip_align1);
1485       }
1486       // Two bytes misalignment happens only for byte and short (char) arrays
1487       __ andcc(to, 2, G0);
1488       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
1489       __ delayed()->nop();
1490       __ sth(value, to, 0);
1491       __ inc(to, 2);
1492       __ dec(count, 1 << (shift - 1));
1493       __ BIND(L_skip_align2);
1494     }
1495     if (!aligned) {
1496       // align to 8 bytes, we know we are 4 byte aligned to start
1497       __ andcc(to, 7, G0);
1498       __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
1499       __ delayed()->nop();
1500       __ stw(value, to, 0);
1501       __ inc(to, 4);
1502       __ dec(count, 1 << shift);
1503       __ BIND(L_fill_32_bytes);
1504     }
1505 
1506     if (t == T_INT) {
1507       // Zero extend value
1508       __ srl(value, 0, value);
1509     }
1510     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
1511       __ sllx(value, 32, O3);
1512       __ or3(value, O3, value);
1513     }
1514 
1515     Label L_check_fill_8_bytes;
1516     // Fill 32-byte chunks
1517     __ subcc(count, 8 << shift, count);
1518     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
1519     __ delayed()->nop();
1520 
1521     Label L_fill_32_bytes_loop, L_fill_4_bytes;
1522     __ align(16);
1523     __ BIND(L_fill_32_bytes_loop);
1524 
1525     __ stx(value, to, 0);
1526     __ stx(value, to, 8);
1527     __ stx(value, to, 16);
1528     __ stx(value, to, 24);
1529 
1530     __ subcc(count, 8 << shift, count);
1531     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
1532     __ delayed()->add(to, 32, to);
1533 
1534     __ BIND(L_check_fill_8_bytes);
1535     __ addcc(count, 8 << shift, count);
1536     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
1537     __ delayed()->subcc(count, 1 << (shift + 1), count);
1538     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
1539     __ delayed()->andcc(count, 1<<shift, G0);
1540 
1541     //
1542     // length is too short, just fill 8 bytes at a time
1543     //
1544     Label L_fill_8_bytes_loop;
1545     __ BIND(L_fill_8_bytes_loop);
1546     __ stx(value, to, 0);
1547     __ subcc(count, 1 << (shift + 1), count);
1548     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
1549     __ delayed()->add(to, 8, to);
1550 
1551     // fill trailing 4 bytes
1552     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
1553     if (t == T_INT) {
1554       __ BIND(L_fill_elements);
1555     }
1556     __ BIND(L_fill_4_bytes);
1557     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
1558     if (t == T_BYTE || t == T_SHORT) {
1559       __ delayed()->andcc(count, 1<<(shift-1), G0);
1560     } else {
1561       __ delayed()->nop();
1562     }
1563     __ stw(value, to, 0);
1564     if (t == T_BYTE || t == T_SHORT) {
1565       __ inc(to, 4);
1566       // fill trailing 2 bytes
1567       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
1568       __ BIND(L_fill_2_bytes);
1569       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
1570       __ delayed()->andcc(count, 1, count);
1571       __ sth(value, to, 0);
1572       if (t == T_BYTE) {
1573         __ inc(to, 2);
1574         // fill trailing byte
1575         __ andcc(count, 1, count);  // in delay slot of branches
1576         __ BIND(L_fill_byte);
1577         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1578         __ delayed()->nop();
1579         __ stb(value, to, 0);
1580       } else {
1581         __ BIND(L_fill_byte);
1582       }
1583     } else {
1584       __ BIND(L_fill_2_bytes);
1585     }
1586     __ BIND(L_exit);
1587     __ retl();
1588     __ delayed()->nop();
1589 
1590     // Handle copies less than 8 bytes.  Int is handled elsewhere.
1591     if (t == T_BYTE) {
1592       __ BIND(L_fill_elements);
1593       Label L_fill_2, L_fill_4;
1594       // in delay slot __ andcc(count, 1, G0);
1595       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1596       __ delayed()->andcc(count, 2, G0);
1597       __ stb(value, to, 0);
1598       __ inc(to, 1);
1599       __ BIND(L_fill_2);
1600       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
1601       __ delayed()->andcc(count, 4, G0);
1602       __ stb(value, to, 0);
1603       __ stb(value, to, 1);
1604       __ inc(to, 2);
1605       __ BIND(L_fill_4);
1606       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1607       __ delayed()->nop();
1608       __ stb(value, to, 0);
1609       __ stb(value, to, 1);
1610       __ stb(value, to, 2);
1611       __ retl();
1612       __ delayed()->stb(value, to, 3);
1613     }
1614 
1615     if (t == T_SHORT) {
1616       Label L_fill_2;
1617       __ BIND(L_fill_elements);
1618       // in delay slot __ andcc(count, 1, G0);
1619       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1620       __ delayed()->andcc(count, 2, G0);
1621       __ sth(value, to, 0);
1622       __ inc(to, 2);
1623       __ BIND(L_fill_2);
1624       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1625       __ delayed()->nop();
1626       __ sth(value, to, 0);
1627       __ retl();
1628       __ delayed()->sth(value, to, 2);
1629     }
1630     return start;
1631   }
1632 
1633   //
1634   //  Generate stub for conjoint short copy.  If "aligned" is true, the
1635   //  "from" and "to" addresses are assumed to be heapword aligned.
1636   //
1637   // Arguments for generated stub:
1638   //      from:  O0
1639   //      to:    O1
1640   //      count: O2 treated as signed
1641   //
1642   address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1643                                        address *entry, const char *name) {
1644     // Do reverse copy.
1645 
1646     __ align(CodeEntryAlignment);
1647     StubCodeMark mark(this, "StubRoutines", name);
1648     address start = __ pc();
1649 
1650     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1651     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1652 
1653     const Register from      = O0;   // source array address
1654     const Register to        = O1;   // destination array address
1655     const Register count     = O2;   // elements count
1656     const Register end_from  = from; // source array end address
1657     const Register end_to    = to;   // destination array end address
1658 
1659     const Register byte_count = O3;  // bytes count to copy
1660 
1661     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1662 
1663     if (entry != NULL) {
1664       *entry = __ pc();
1665       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1666       BLOCK_COMMENT("Entry:");
1667     }
1668 
1669     array_overlap_test(nooverlap_target, 1);
1670 
1671     {
1672       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
1673       UnsafeCopyMemoryMark ucmm(this, !aligned, false);
1674 
1675       __ sllx(count, LogBytesPerShort, byte_count);
1676       __ add(to, byte_count, end_to);  // offset after last copied element
1677 
1678       // for short arrays, just do single element copy
1679       __ cmp(count, 11); // 8 + 3  (22 bytes)
1680       __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1681       __ delayed()->add(from, byte_count, end_from);
1682 
1683       {
1684         // Align end of arrays since they could be not aligned even
1685         // when arrays itself are aligned.
1686 
1687         // copy 1 element if necessary to align 'end_to' on an 4 bytes
1688         __ andcc(end_to, 3, G0);
1689         __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1690         __ delayed()->lduh(end_from, -2, O3);
1691         __ dec(end_from, 2);
1692         __ dec(end_to, 2);
1693         __ dec(count);
1694         __ sth(O3, end_to, 0);
1695       __ BIND(L_skip_alignment);
1696 
1697         // copy 2 elements to align 'end_to' on an 8 byte boundary
1698         __ andcc(end_to, 7, G0);
1699         __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1700         __ delayed()->lduh(end_from, -2, O3);
1701         __ dec(count, 2);
1702         __ lduh(end_from, -4, O4);
1703         __ dec(end_from, 4);
1704         __ dec(end_to, 4);
1705         __ sth(O3, end_to, 2);
1706         __ sth(O4, end_to, 0);
1707       __ BIND(L_skip_alignment2);
1708       }
1709       if (aligned) {
1710         // Both arrays are aligned to 8-bytes in 64-bits VM.
1711         // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1712         // in unaligned case.
1713         __ dec(count, 8);
1714       } else {
1715         // Copy with shift 16 bytes per iteration if arrays do not have
1716         // the same alignment mod 8, otherwise jump to the next
1717         // code for aligned copy (and substracting 8 from 'count' before jump).
1718         // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1719         // Also jump over aligned copy after the copy with shift completed.
1720 
1721         copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1722                                         L_aligned_copy, L_copy_2_bytes);
1723       }
1724       // copy 4 elements (16 bytes) at a time
1725         __ align(OptoLoopAlignment);
1726       __ BIND(L_aligned_copy);
1727         __ dec(end_from, 16);
1728         __ ldx(end_from, 8, O3);
1729         __ ldx(end_from, 0, O4);
1730         __ dec(end_to, 16);
1731         __ deccc(count, 8);
1732         __ stx(O3, end_to, 8);
1733         __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1734         __ delayed()->stx(O4, end_to, 0);
1735         __ inc(count, 8);
1736 
1737       // copy 1 element (2 bytes) at a time
1738       __ BIND(L_copy_2_bytes);
1739         __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1740       __ BIND(L_copy_2_bytes_loop);
1741         __ dec(end_from, 2);
1742         __ dec(end_to, 2);
1743         __ lduh(end_from, 0, O4);
1744         __ deccc(count);
1745         __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1746         __ delayed()->sth(O4, end_to, 0);
1747     }
1748     __ BIND(L_exit);
1749     // O3, O4 are used as temp registers
1750     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1751     __ retl();
1752     __ delayed()->mov(G0, O0); // return 0
1753     return start;
1754   }
1755 
1756   //
1757   // Helper methods for generate_disjoint_int_copy_core()
1758   //
1759   void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
1760                           Label& L_loop, bool use_prefetch, bool use_bis) {
1761 
1762     __ align(OptoLoopAlignment);
1763     __ BIND(L_loop);
1764     if (use_prefetch) {
1765       if (ArraycopySrcPrefetchDistance > 0) {
1766         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
1767       }
1768       if (ArraycopyDstPrefetchDistance > 0) {
1769         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
1770       }
1771     }
1772     __ ldx(from, 4, O4);
1773     __ ldx(from, 12, G4);
1774     __ inc(to, 16);
1775     __ inc(from, 16);
1776     __ deccc(count, 4); // Can we do next iteration after this one?
1777 
1778     __ srlx(O4, 32, G3);
1779     __ bset(G3, O3);
1780     __ sllx(O4, 32, O4);
1781     __ srlx(G4, 32, G3);
1782     __ bset(G3, O4);
1783     if (use_bis) {
1784       __ stxa(O3, to, -16);
1785       __ stxa(O4, to, -8);
1786     } else {
1787       __ stx(O3, to, -16);
1788       __ stx(O4, to, -8);
1789     }
1790     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1791     __ delayed()->sllx(G4, 32,  O3);
1792 
1793   }
1794 
1795   //
1796   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
1797   //  If "aligned" is true, the "from" and "to" addresses are assumed
1798   //  to be heapword aligned.
1799   //
1800   // Arguments:
1801   //      from:  O0
1802   //      to:    O1
1803   //      count: O2 treated as signed
1804   //
1805   void generate_disjoint_int_copy_core(bool aligned) {
1806 
1807     Label L_skip_alignment, L_aligned_copy;
1808     Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1809 
1810     const Register from      = O0;   // source array address
1811     const Register to        = O1;   // destination array address
1812     const Register count     = O2;   // elements count
1813     const Register offset    = O5;   // offset from start of arrays
1814     // O3, O4, G3, G4 are used as temp registers
1815 
1816     // 'aligned' == true when it is known statically during compilation
1817     // of this arraycopy call site that both 'from' and 'to' addresses
1818     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1819     //
1820     // Aligned arrays have 4 bytes alignment in 32-bits VM
1821     // and 8 bytes - in 64-bits VM.
1822     //
1823     if (!aligned) {
1824       // The next check could be put under 'ifndef' since the code in
1825       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1826 
1827       // for short arrays, just do single element copy
1828       __ cmp(count, 5); // 4 + 1 (20 bytes)
1829       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1830       __ delayed()->mov(G0, offset);
1831 
1832       // copy 1 element to align 'to' on an 8 byte boundary
1833       __ andcc(to, 7, G0);
1834       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1835       __ delayed()->ld(from, 0, O3);
1836       __ inc(from, 4);
1837       __ inc(to, 4);
1838       __ dec(count);
1839       __ st(O3, to, -4);
1840     __ BIND(L_skip_alignment);
1841 
1842     // if arrays have same alignment mod 8, do 4 elements copy
1843       __ andcc(from, 7, G0);
1844       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1845       __ delayed()->ld(from, 0, O3);
1846 
1847     //
1848     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1849     // to form 2 aligned 8-bytes chunks to store.
1850     //
1851     // copy_16_bytes_forward_with_shift() is not used here since this
1852     // code is more optimal.
1853 
1854     // copy with shift 4 elements (16 bytes) at a time
1855       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
1856       __ sllx(O3, 32,  O3);
1857 
1858       disjoint_copy_core(from, to, count, 2, 16, &StubGenerator::copy_16_bytes_loop);
1859 
1860       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
1861       __ delayed()->inc(count, 4); // restore 'count'
1862 
1863     __ BIND(L_aligned_copy);
1864     } // !aligned
1865 
1866     // copy 4 elements (16 bytes) at a time
1867       __ and3(count, 1, G4); // Save
1868       __ srl(count, 1, count);
1869      generate_disjoint_long_copy_core(aligned);
1870       __ mov(G4, count);     // Restore
1871 
1872     // copy 1 element at a time
1873     __ BIND(L_copy_4_bytes);
1874       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1875     __ BIND(L_copy_4_bytes_loop);
1876       __ ld(from, offset, O3);
1877       __ deccc(count);
1878       __ st(O3, to, offset);
1879       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
1880       __ delayed()->inc(offset, 4);
1881     __ BIND(L_exit);
1882   }
1883 
1884   //
1885   //  Generate stub for disjoint int copy.  If "aligned" is true, the
1886   //  "from" and "to" addresses are assumed to be heapword aligned.
1887   //
1888   // Arguments for generated stub:
1889   //      from:  O0
1890   //      to:    O1
1891   //      count: O2 treated as signed
1892   //
1893   address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
1894     __ align(CodeEntryAlignment);
1895     StubCodeMark mark(this, "StubRoutines", name);
1896     address start = __ pc();
1897 
1898     const Register count = O2;
1899     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1900 
1901     if (entry != NULL) {
1902       *entry = __ pc();
1903       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1904       BLOCK_COMMENT("Entry:");
1905     }
1906     {
1907       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
1908       UnsafeCopyMemoryMark ucmm(this, !aligned, false);
1909       generate_disjoint_int_copy_core(aligned);
1910     }
1911     // O3, O4 are used as temp registers
1912     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
1913     __ retl();
1914     __ delayed()->mov(G0, O0); // return 0
1915     return start;
1916   }
1917 
1918   //
1919   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
1920   //  If "aligned" is true, the "from" and "to" addresses are assumed
1921   //  to be heapword aligned.
1922   //
1923   // Arguments:
1924   //      from:  O0
1925   //      to:    O1
1926   //      count: O2 treated as signed
1927   //
1928   void generate_conjoint_int_copy_core(bool aligned) {
1929     // Do reverse copy.
1930 
1931     Label L_skip_alignment, L_aligned_copy;
1932     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1933 
1934     const Register from      = O0;   // source array address
1935     const Register to        = O1;   // destination array address
1936     const Register count     = O2;   // elements count
1937     const Register end_from  = from; // source array end address
1938     const Register end_to    = to;   // destination array end address
1939     // O3, O4, O5, G3 are used as temp registers
1940 
1941     const Register byte_count = O3;  // bytes count to copy
1942 
1943       __ sllx(count, LogBytesPerInt, byte_count);
1944       __ add(to, byte_count, end_to); // offset after last copied element
1945 
1946       __ cmp(count, 5); // for short arrays, just do single element copy
1947       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1948       __ delayed()->add(from, byte_count, end_from);
1949 
1950     // copy 1 element to align 'to' on an 8 byte boundary
1951       __ andcc(end_to, 7, G0);
1952       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1953       __ delayed()->nop();
1954       __ dec(count);
1955       __ dec(end_from, 4);
1956       __ dec(end_to,   4);
1957       __ ld(end_from, 0, O4);
1958       __ st(O4, end_to, 0);
1959     __ BIND(L_skip_alignment);
1960 
1961     // Check if 'end_from' and 'end_to' has the same alignment.
1962       __ andcc(end_from, 7, G0);
1963       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1964       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
1965 
1966     // copy with shift 4 elements (16 bytes) at a time
1967     //
1968     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1969     // to form 2 aligned 8-bytes chunks to store.
1970     //
1971       __ ldx(end_from, -4, O3);
1972       __ align(OptoLoopAlignment);
1973     __ BIND(L_copy_16_bytes);
1974       __ ldx(end_from, -12, O4);
1975       __ deccc(count, 4);
1976       __ ldx(end_from, -20, O5);
1977       __ dec(end_to, 16);
1978       __ dec(end_from, 16);
1979       __ srlx(O3, 32, O3);
1980       __ sllx(O4, 32, G3);
1981       __ bset(G3, O3);
1982       __ stx(O3, end_to, 8);
1983       __ srlx(O4, 32, O4);
1984       __ sllx(O5, 32, G3);
1985       __ bset(O4, G3);
1986       __ stx(G3, end_to, 0);
1987       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
1988       __ delayed()->mov(O5, O3);
1989 
1990       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
1991       __ delayed()->inc(count, 4);
1992 
1993     // copy 4 elements (16 bytes) at a time
1994       __ align(OptoLoopAlignment);
1995     __ BIND(L_aligned_copy);
1996       __ dec(end_from, 16);
1997       __ ldx(end_from, 8, O3);
1998       __ ldx(end_from, 0, O4);
1999       __ dec(end_to, 16);
2000       __ deccc(count, 4);
2001       __ stx(O3, end_to, 8);
2002       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2003       __ delayed()->stx(O4, end_to, 0);
2004       __ inc(count, 4);
2005 
2006     // copy 1 element (4 bytes) at a time
2007     __ BIND(L_copy_4_bytes);
2008       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2009     __ BIND(L_copy_4_bytes_loop);
2010       __ dec(end_from, 4);
2011       __ dec(end_to, 4);
2012       __ ld(end_from, 0, O4);
2013       __ deccc(count);
2014       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2015       __ delayed()->st(O4, end_to, 0);
2016     __ BIND(L_exit);
2017   }
2018 
2019   //
2020   //  Generate stub for conjoint int copy.  If "aligned" is true, the
2021   //  "from" and "to" addresses are assumed to be heapword aligned.
2022   //
2023   // Arguments for generated stub:
2024   //      from:  O0
2025   //      to:    O1
2026   //      count: O2 treated as signed
2027   //
2028   address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
2029                                      address *entry, const char *name) {
2030     __ align(CodeEntryAlignment);
2031     StubCodeMark mark(this, "StubRoutines", name);
2032     address start = __ pc();
2033 
2034     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2035 
2036     if (entry != NULL) {
2037       *entry = __ pc();
2038       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2039       BLOCK_COMMENT("Entry:");
2040     }
2041 
2042     array_overlap_test(nooverlap_target, 2);
2043     {
2044       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
2045       UnsafeCopyMemoryMark ucmm(this, !aligned, false);
2046       generate_conjoint_int_copy_core(aligned);
2047     }
2048     // O3, O4 are used as temp registers
2049     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2050     __ retl();
2051     __ delayed()->mov(G0, O0); // return 0
2052     return start;
2053   }
2054 
2055   //
2056   // Helper methods for generate_disjoint_long_copy_core()
2057   //
2058   void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
2059                           Label& L_loop, bool use_prefetch, bool use_bis) {
2060     __ align(OptoLoopAlignment);
2061     __ BIND(L_loop);
2062     for (int off = 0; off < 64; off += 16) {
2063       if (use_prefetch && (off & 31) == 0) {
2064         if (ArraycopySrcPrefetchDistance > 0) {
2065           __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads);
2066         }
2067         if (ArraycopyDstPrefetchDistance > 0) {
2068           __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads);
2069         }
2070       }
2071       __ ldx(from,  off+0, O4);
2072       __ ldx(from,  off+8, O5);
2073       if (use_bis) {
2074         __ stxa(O4, to,  off+0);
2075         __ stxa(O5, to,  off+8);
2076       } else {
2077         __ stx(O4, to,  off+0);
2078         __ stx(O5, to,  off+8);
2079       }
2080     }
2081     __ deccc(count, 8);
2082     __ inc(from, 64);
2083     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
2084     __ delayed()->inc(to, 64);
2085   }
2086 
2087   //
2088   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
2089   //  "aligned" is ignored, because we must make the stronger
2090   //  assumption that both addresses are always 64-bit aligned.
2091   //
2092   // Arguments:
2093   //      from:  O0
2094   //      to:    O1
2095   //      count: O2 treated as signed
2096   //
2097   // count -= 2;
2098   // if ( count >= 0 ) { // >= 2 elements
2099   //   if ( count > 6) { // >= 8 elements
2100   //     count -= 6; // original count - 8
2101   //     do {
2102   //       copy_8_elements;
2103   //       count -= 8;
2104   //     } while ( count >= 0 );
2105   //     count += 6;
2106   //   }
2107   //   if ( count >= 0 ) { // >= 2 elements
2108   //     do {
2109   //       copy_2_elements;
2110   //     } while ( (count=count-2) >= 0 );
2111   //   }
2112   // }
2113   // count += 2;
2114   // if ( count != 0 ) { // 1 element left
2115   //   copy_1_element;
2116   // }
2117   //
2118   void generate_disjoint_long_copy_core(bool aligned) {
2119     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2120     const Register from    = O0;  // source array address
2121     const Register to      = O1;  // destination array address
2122     const Register count   = O2;  // elements count
2123     const Register offset0 = O4;  // element offset
2124     const Register offset8 = O5;  // next element offset
2125 
2126     __ deccc(count, 2);
2127     __ mov(G0, offset0);   // offset from start of arrays (0)
2128     __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2129     __ delayed()->add(offset0, 8, offset8);
2130 
2131     // Copy by 64 bytes chunks
2132 
2133     const Register from64 = O3;  // source address
2134     const Register to64   = G3;  // destination address
2135     __ subcc(count, 6, O3);
2136     __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
2137     __ delayed()->mov(to,   to64);
2138     // Now we can use O4(offset0), O5(offset8) as temps
2139     __ mov(O3, count);
2140     // count >= 0 (original count - 8)
2141     __ mov(from, from64);
2142 
2143     disjoint_copy_core(from64, to64, count, 3, 64, &StubGenerator::copy_64_bytes_loop);
2144 
2145       // Restore O4(offset0), O5(offset8)
2146       __ sub(from64, from, offset0);
2147       __ inccc(count, 6); // restore count
2148       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2149       __ delayed()->add(offset0, 8, offset8);
2150 
2151       // Copy by 16 bytes chunks
2152       __ align(OptoLoopAlignment);
2153     __ BIND(L_copy_16_bytes);
2154       __ ldx(from, offset0, O3);
2155       __ ldx(from, offset8, G3);
2156       __ deccc(count, 2);
2157       __ stx(O3, to, offset0);
2158       __ inc(offset0, 16);
2159       __ stx(G3, to, offset8);
2160       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2161       __ delayed()->inc(offset8, 16);
2162 
2163       // Copy last 8 bytes
2164     __ BIND(L_copy_8_bytes);
2165       __ inccc(count, 2);
2166       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2167       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2168       __ ldx(from, offset0, O3);
2169       __ stx(O3, to, offset0);
2170     __ BIND(L_exit);
2171   }
2172 
2173   //
2174   //  Generate stub for disjoint long copy.
2175   //  "aligned" is ignored, because we must make the stronger
2176   //  assumption that both addresses are always 64-bit aligned.
2177   //
2178   // Arguments for generated stub:
2179   //      from:  O0
2180   //      to:    O1
2181   //      count: O2 treated as signed
2182   //
2183   address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
2184     __ align(CodeEntryAlignment);
2185     StubCodeMark mark(this, "StubRoutines", name);
2186     address start = __ pc();
2187 
2188     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2189 
2190     if (entry != NULL) {
2191       *entry = __ pc();
2192       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2193       BLOCK_COMMENT("Entry:");
2194     }
2195 
2196     {
2197       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
2198       UnsafeCopyMemoryMark ucmm(this, true, false);
2199       generate_disjoint_long_copy_core(aligned);
2200     }
2201     // O3, O4 are used as temp registers
2202     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2203     __ retl();
2204     __ delayed()->mov(G0, O0); // return 0
2205     return start;
2206   }
2207 
2208   //
2209   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
2210   //  "aligned" is ignored, because we must make the stronger
2211   //  assumption that both addresses are always 64-bit aligned.
2212   //
2213   // Arguments:
2214   //      from:  O0
2215   //      to:    O1
2216   //      count: O2 treated as signed
2217   //
2218   void generate_conjoint_long_copy_core(bool aligned) {
2219     // Do reverse copy.
2220     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2221     const Register from    = O0;  // source array address
2222     const Register to      = O1;  // destination array address
2223     const Register count   = O2;  // elements count
2224     const Register offset8 = O4;  // element offset
2225     const Register offset0 = O5;  // previous element offset
2226 
2227       __ subcc(count, 1, count);
2228       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2229       __ delayed()->sllx(count, LogBytesPerLong, offset8);
2230       __ sub(offset8, 8, offset0);
2231       __ align(OptoLoopAlignment);
2232     __ BIND(L_copy_16_bytes);
2233       __ ldx(from, offset8, O2);
2234       __ ldx(from, offset0, O3);
2235       __ stx(O2, to, offset8);
2236       __ deccc(offset8, 16);      // use offset8 as counter
2237       __ stx(O3, to, offset0);
2238       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2239       __ delayed()->dec(offset0, 16);
2240 
2241     __ BIND(L_copy_8_bytes);
2242       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2243       __ delayed()->nop();
2244       __ ldx(from, 0, O3);
2245       __ stx(O3, to, 0);
2246     __ BIND(L_exit);
2247   }
2248 
2249   //  Generate stub for conjoint long copy.
2250   //  "aligned" is ignored, because we must make the stronger
2251   //  assumption that both addresses are always 64-bit aligned.
2252   //
2253   // Arguments for generated stub:
2254   //      from:  O0
2255   //      to:    O1
2256   //      count: O2 treated as signed
2257   //
2258   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2259                                       address *entry, const char *name) {
2260     __ align(CodeEntryAlignment);
2261     StubCodeMark mark(this, "StubRoutines", name);
2262     address start = __ pc();
2263 
2264     assert(aligned, "Should always be aligned");
2265 
2266     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2267 
2268     if (entry != NULL) {
2269       *entry = __ pc();
2270       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2271       BLOCK_COMMENT("Entry:");
2272     }
2273 
2274     array_overlap_test(nooverlap_target, 3);
2275     {
2276       // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
2277       UnsafeCopyMemoryMark ucmm(this, true, false);
2278       generate_conjoint_long_copy_core(aligned);
2279     }
2280     // O3, O4 are used as temp registers
2281     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2282     __ retl();
2283     __ delayed()->mov(G0, O0); // return 0
2284     return start;
2285   }
2286 
2287   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2288   //  "from" and "to" addresses are assumed to be heapword aligned.
2289   //
2290   // Arguments for generated stub:
2291   //      from:  O0
2292   //      to:    O1
2293   //      count: O2 treated as signed
2294   //
2295   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
2296                                      bool dest_uninitialized = false) {
2297 
2298     const Register from  = O0;  // source array address
2299     const Register to    = O1;  // destination array address
2300     const Register count = O2;  // elements count
2301 
2302     __ align(CodeEntryAlignment);
2303     StubCodeMark mark(this, "StubRoutines", name);
2304     address start = __ pc();
2305 
2306     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2307 
2308     if (entry != NULL) {
2309       *entry = __ pc();
2310       // caller can pass a 64-bit byte count here
2311       BLOCK_COMMENT("Entry:");
2312     }
2313 
2314     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
2315     if (dest_uninitialized) {
2316       decorators |= IS_DEST_UNINITIALIZED;
2317     }
2318     if (aligned) {
2319       decorators |= ARRAYCOPY_ALIGNED;
2320     }
2321 
2322     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2323     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count);
2324 
2325     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2326     if (UseCompressedOops) {
2327       generate_disjoint_int_copy_core(aligned);
2328     } else {
2329       generate_disjoint_long_copy_core(aligned);
2330     }
2331 
2332     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count);
2333 
2334     // O3, O4 are used as temp registers
2335     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2336     __ retl();
2337     __ delayed()->mov(G0, O0); // return 0
2338     return start;
2339   }
2340 
2341   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2342   //  "from" and "to" addresses are assumed to be heapword aligned.
2343   //
2344   // Arguments for generated stub:
2345   //      from:  O0
2346   //      to:    O1
2347   //      count: O2 treated as signed
2348   //
2349   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2350                                      address *entry, const char *name,
2351                                      bool dest_uninitialized = false) {
2352 
2353     const Register from  = O0;  // source array address
2354     const Register to    = O1;  // destination array address
2355     const Register count = O2;  // elements count
2356 
2357     __ align(CodeEntryAlignment);
2358     StubCodeMark mark(this, "StubRoutines", name);
2359     address start = __ pc();
2360 
2361     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2362 
2363     if (entry != NULL) {
2364       *entry = __ pc();
2365       // caller can pass a 64-bit byte count here
2366       BLOCK_COMMENT("Entry:");
2367     }
2368 
2369     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2370 
2371     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2372     if (dest_uninitialized) {
2373       decorators |= IS_DEST_UNINITIALIZED;
2374     }
2375     if (aligned) {
2376       decorators |= ARRAYCOPY_ALIGNED;
2377     }
2378 
2379     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2380     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count);
2381 
2382     if (UseCompressedOops) {
2383       generate_conjoint_int_copy_core(aligned);
2384     } else {
2385       generate_conjoint_long_copy_core(aligned);
2386     }
2387 
2388     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count);
2389 
2390     // O3, O4 are used as temp registers
2391     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2392     __ retl();
2393     __ delayed()->mov(G0, O0); // return 0
2394     return start;
2395   }
2396 
2397 
2398   // Helper for generating a dynamic type check.
2399   // Smashes only the given temp registers.
2400   void generate_type_check(Register sub_klass,
2401                            Register super_check_offset,
2402                            Register super_klass,
2403                            Register temp,
2404                            Label& L_success) {
2405     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2406 
2407     BLOCK_COMMENT("type_check:");
2408 
2409     Label L_miss, L_pop_to_miss;
2410 
2411     assert_clean_int(super_check_offset, temp);
2412 
2413     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2414                                      &L_success, &L_miss, NULL,
2415                                      super_check_offset);
2416 
2417     BLOCK_COMMENT("type_check_slow_path:");
2418     __ save_frame(0);
2419     __ check_klass_subtype_slow_path(sub_klass->after_save(),
2420                                      super_klass->after_save(),
2421                                      L0, L1, L2, L4,
2422                                      NULL, &L_pop_to_miss);
2423     __ ba(L_success);
2424     __ delayed()->restore();
2425 
2426     __ bind(L_pop_to_miss);
2427     __ restore();
2428 
2429     // Fall through on failure!
2430     __ BIND(L_miss);
2431   }
2432 
2433 
2434   //  Generate stub for checked oop copy.
2435   //
2436   // Arguments for generated stub:
2437   //      from:  O0
2438   //      to:    O1
2439   //      count: O2 treated as signed
2440   //      ckoff: O3 (super_check_offset)
2441   //      ckval: O4 (super_klass)
2442   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2443   //
2444   address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) {
2445 
2446     const Register O0_from   = O0;      // source array address
2447     const Register O1_to     = O1;      // destination array address
2448     const Register O2_count  = O2;      // elements count
2449     const Register O3_ckoff  = O3;      // super_check_offset
2450     const Register O4_ckval  = O4;      // super_klass
2451 
2452     const Register O5_offset = O5;      // loop var, with stride wordSize
2453     const Register G1_remain = G1;      // loop var, with stride -1
2454     const Register G3_oop    = G3;      // actual oop copied
2455     const Register G4_klass  = G4;      // oop._klass
2456     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2457 
2458     __ align(CodeEntryAlignment);
2459     StubCodeMark mark(this, "StubRoutines", name);
2460     address start = __ pc();
2461 
2462 #ifdef ASSERT
2463     // We sometimes save a frame (see generate_type_check below).
2464     // If this will cause trouble, let's fail now instead of later.
2465     __ save_frame(0);
2466     __ restore();
2467 #endif
2468 
2469     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
2470 
2471 #ifdef ASSERT
2472     // caller guarantees that the arrays really are different
2473     // otherwise, we would have to make conjoint checks
2474     { Label L;
2475       __ mov(O3, G1);           // spill: overlap test smashes O3
2476       __ mov(O4, G4);           // spill: overlap test smashes O4
2477       array_overlap_test(L, LogBytesPerHeapOop);
2478       __ stop("checkcast_copy within a single array");
2479       __ bind(L);
2480       __ mov(G1, O3);
2481       __ mov(G4, O4);
2482     }
2483 #endif //ASSERT
2484 
2485     if (entry != NULL) {
2486       *entry = __ pc();
2487       // caller can pass a 64-bit byte count here (from generic stub)
2488       BLOCK_COMMENT("Entry:");
2489     }
2490 
2491     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
2492     if (dest_uninitialized) {
2493       decorators |= IS_DEST_UNINITIALIZED;
2494     }
2495 
2496     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2497     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count);
2498 
2499     Label load_element, store_element, do_epilogue, fail, done;
2500     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
2501     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2502     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
2503 
2504     // Empty array:  Nothing to do.
2505     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2506     __ retl();
2507     __ delayed()->set(0, O0);           // return 0 on (trivial) success
2508 
2509     // ======== begin loop ========
2510     // (Loop is rotated; its entry is load_element.)
2511     // Loop variables:
2512     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2513     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2514     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2515     __ align(OptoLoopAlignment);
2516 
2517     __ BIND(store_element);
2518     __ deccc(G1_remain);                // decrement the count
2519     __ store_heap_oop(G3_oop, O1_to, O5_offset, noreg, AS_RAW); // store the oop
2520     __ inc(O5_offset, heapOopSize);     // step to next offset
2521     __ brx(Assembler::zero, true, Assembler::pt, do_epilogue);
2522     __ delayed()->set(0, O0);           // return -1 on success
2523 
2524     // ======== loop entry is here ========
2525     __ BIND(load_element);
2526     __ load_heap_oop(O0_from, O5_offset, G3_oop, noreg, AS_RAW);  // load the oop
2527     __ br_null_short(G3_oop, Assembler::pt, store_element);
2528 
2529     __ load_klass(G3_oop, G4_klass); // query the object klass
2530 
2531     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2532                         // branch to this on success:
2533                         store_element);
2534     // ======== end loop ========
2535 
2536     // It was a real error; we must depend on the caller to finish the job.
2537     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2538     // Emit GC store barriers for the oops we have copied (O2 minus G1),
2539     // and report their number to the caller.
2540     __ BIND(fail);
2541     __ subcc(O2_count, G1_remain, O2_count);
2542     __ brx(Assembler::zero, false, Assembler::pt, done);
2543     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
2544 
2545     __ BIND(do_epilogue);
2546     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count);
2547 
2548     __ BIND(done);
2549     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2550     __ retl();
2551     __ delayed()->nop();             // return value in 00
2552 
2553     return start;
2554   }
2555 
2556 
2557   //  Generate 'unsafe' array copy stub
2558   //  Though just as safe as the other stubs, it takes an unscaled
2559   //  size_t argument instead of an element count.
2560   //
2561   // Arguments for generated stub:
2562   //      from:  O0
2563   //      to:    O1
2564   //      count: O2 byte count, treated as ssize_t, can be zero
2565   //
2566   // Examines the alignment of the operands and dispatches
2567   // to a long, int, short, or byte copy loop.
2568   //
2569   address generate_unsafe_copy(const char* name,
2570                                address byte_copy_entry,
2571                                address short_copy_entry,
2572                                address int_copy_entry,
2573                                address long_copy_entry) {
2574 
2575     const Register O0_from   = O0;      // source array address
2576     const Register O1_to     = O1;      // destination array address
2577     const Register O2_count  = O2;      // elements count
2578 
2579     const Register G1_bits   = G1;      // test copy of low bits
2580 
2581     __ align(CodeEntryAlignment);
2582     StubCodeMark mark(this, "StubRoutines", name);
2583     address start = __ pc();
2584 
2585     // bump this on entry, not on exit:
2586     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2587 
2588     __ or3(O0_from, O1_to, G1_bits);
2589     __ or3(O2_count,       G1_bits, G1_bits);
2590 
2591     __ btst(BytesPerLong-1, G1_bits);
2592     __ br(Assembler::zero, true, Assembler::pt,
2593           long_copy_entry, relocInfo::runtime_call_type);
2594     // scale the count on the way out:
2595     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2596 
2597     __ btst(BytesPerInt-1, G1_bits);
2598     __ br(Assembler::zero, true, Assembler::pt,
2599           int_copy_entry, relocInfo::runtime_call_type);
2600     // scale the count on the way out:
2601     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2602 
2603     __ btst(BytesPerShort-1, G1_bits);
2604     __ br(Assembler::zero, true, Assembler::pt,
2605           short_copy_entry, relocInfo::runtime_call_type);
2606     // scale the count on the way out:
2607     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2608 
2609     __ br(Assembler::always, false, Assembler::pt,
2610           byte_copy_entry, relocInfo::runtime_call_type);
2611     __ delayed()->nop();
2612 
2613     return start;
2614   }
2615 
2616 
2617   // Perform range checks on the proposed arraycopy.
2618   // Kills the two temps, but nothing else.
2619   // Also, clean the sign bits of src_pos and dst_pos.
2620   void arraycopy_range_checks(Register src,     // source array oop (O0)
2621                               Register src_pos, // source position (O1)
2622                               Register dst,     // destination array oo (O2)
2623                               Register dst_pos, // destination position (O3)
2624                               Register length,  // length of copy (O4)
2625                               Register temp1, Register temp2,
2626                               Label& L_failed) {
2627     BLOCK_COMMENT("arraycopy_range_checks:");
2628 
2629     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2630 
2631     const Register array_length = temp1;  // scratch
2632     const Register end_pos      = temp2;  // scratch
2633 
2634     // Note:  This next instruction may be in the delay slot of a branch:
2635     __ add(length, src_pos, end_pos);  // src_pos + length
2636     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2637     __ cmp(end_pos, array_length);
2638     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2639 
2640     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2641     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2642     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2643     __ cmp(end_pos, array_length);
2644     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2645 
2646     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2647     // Move with sign extension can be used since they are positive.
2648     __ delayed()->signx(src_pos, src_pos);
2649     __ signx(dst_pos, dst_pos);
2650 
2651     BLOCK_COMMENT("arraycopy_range_checks done");
2652   }
2653 
2654 
2655   //
2656   //  Generate generic array copy stubs
2657   //
2658   //  Input:
2659   //    O0    -  src oop
2660   //    O1    -  src_pos
2661   //    O2    -  dst oop
2662   //    O3    -  dst_pos
2663   //    O4    -  element count
2664   //
2665   //  Output:
2666   //    O0 ==  0  -  success
2667   //    O0 == -1  -  need to call System.arraycopy
2668   //
2669   address generate_generic_copy(const char *name,
2670                                 address entry_jbyte_arraycopy,
2671                                 address entry_jshort_arraycopy,
2672                                 address entry_jint_arraycopy,
2673                                 address entry_oop_arraycopy,
2674                                 address entry_jlong_arraycopy,
2675                                 address entry_checkcast_arraycopy) {
2676     Label L_failed, L_objArray;
2677 
2678     // Input registers
2679     const Register src      = O0;  // source array oop
2680     const Register src_pos  = O1;  // source position
2681     const Register dst      = O2;  // destination array oop
2682     const Register dst_pos  = O3;  // destination position
2683     const Register length   = O4;  // elements count
2684 
2685     // registers used as temp
2686     const Register G3_src_klass = G3; // source array klass
2687     const Register G4_dst_klass = G4; // destination array klass
2688     const Register G5_lh        = G5; // layout handler
2689     const Register O5_temp      = O5;
2690 
2691     __ align(CodeEntryAlignment);
2692     StubCodeMark mark(this, "StubRoutines", name);
2693     address start = __ pc();
2694 
2695     // bump this on entry, not on exit:
2696     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2697 
2698     // In principle, the int arguments could be dirty.
2699     //assert_clean_int(src_pos, G1);
2700     //assert_clean_int(dst_pos, G1);
2701     //assert_clean_int(length, G1);
2702 
2703     //-----------------------------------------------------------------------
2704     // Assembler stubs will be used for this call to arraycopy
2705     // if the following conditions are met:
2706     //
2707     // (1) src and dst must not be null.
2708     // (2) src_pos must not be negative.
2709     // (3) dst_pos must not be negative.
2710     // (4) length  must not be negative.
2711     // (5) src klass and dst klass should be the same and not NULL.
2712     // (6) src and dst should be arrays.
2713     // (7) src_pos + length must not exceed length of src.
2714     // (8) dst_pos + length must not exceed length of dst.
2715     BLOCK_COMMENT("arraycopy initial argument checks");
2716 
2717     //  if (src == NULL) return -1;
2718     __ br_null(src, false, Assembler::pn, L_failed);
2719 
2720     //  if (src_pos < 0) return -1;
2721     __ delayed()->tst(src_pos);
2722     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2723     __ delayed()->nop();
2724 
2725     //  if (dst == NULL) return -1;
2726     __ br_null(dst, false, Assembler::pn, L_failed);
2727 
2728     //  if (dst_pos < 0) return -1;
2729     __ delayed()->tst(dst_pos);
2730     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2731 
2732     //  if (length < 0) return -1;
2733     __ delayed()->tst(length);
2734     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2735 
2736     BLOCK_COMMENT("arraycopy argument klass checks");
2737     //  get src->klass()
2738     if (UseCompressedClassPointers) {
2739       __ delayed()->nop(); // ??? not good
2740       __ load_klass(src, G3_src_klass);
2741     } else {
2742       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2743     }
2744 
2745 #ifdef ASSERT
2746     //  assert(src->klass() != NULL);
2747     BLOCK_COMMENT("assert klasses not null");
2748     { Label L_a, L_b;
2749       __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
2750       __ bind(L_a);
2751       __ stop("broken null klass");
2752       __ bind(L_b);
2753       __ load_klass(dst, G4_dst_klass);
2754       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2755       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
2756       BLOCK_COMMENT("assert done");
2757     }
2758 #endif
2759 
2760     // Load layout helper
2761     //
2762     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2763     // 32        30    24            16              8     2                 0
2764     //
2765     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2766     //
2767 
2768     int lh_offset = in_bytes(Klass::layout_helper_offset());
2769 
2770     // Load 32-bits signed value. Use br() instruction with it to check icc.
2771     __ lduw(G3_src_klass, lh_offset, G5_lh);
2772 
2773     if (UseCompressedClassPointers) {
2774       __ load_klass(dst, G4_dst_klass);
2775     }
2776     // Handle objArrays completely differently...
2777     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2778     __ set(objArray_lh, O5_temp);
2779     __ cmp(G5_lh,       O5_temp);
2780     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2781     if (UseCompressedClassPointers) {
2782       __ delayed()->nop();
2783     } else {
2784       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2785     }
2786 
2787     //  if (src->klass() != dst->klass()) return -1;
2788     __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
2789 
2790     //  if (!src->is_Array()) return -1;
2791     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2792     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2793 
2794     // At this point, it is known to be a typeArray (array_tag 0x3).
2795 #ifdef ASSERT
2796     __ delayed()->nop();
2797     { Label L;
2798       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2799       __ set(lh_prim_tag_in_place, O5_temp);
2800       __ cmp(G5_lh,                O5_temp);
2801       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2802       __ delayed()->nop();
2803       __ stop("must be a primitive array");
2804       __ bind(L);
2805     }
2806 #else
2807     __ delayed();                               // match next insn to prev branch
2808 #endif
2809 
2810     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2811                            O5_temp, G4_dst_klass, L_failed);
2812 
2813     // TypeArrayKlass
2814     //
2815     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2816     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2817     //
2818 
2819     const Register G4_offset = G4_dst_klass;    // array offset
2820     const Register G3_elsize = G3_src_klass;    // log2 element size
2821 
2822     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2823     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2824     __ add(src, G4_offset, src);       // src array offset
2825     __ add(dst, G4_offset, dst);       // dst array offset
2826     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2827 
2828     // next registers should be set before the jump to corresponding stub
2829     const Register from     = O0;  // source array address
2830     const Register to       = O1;  // destination array address
2831     const Register count    = O2;  // elements count
2832 
2833     // 'from', 'to', 'count' registers should be set in this order
2834     // since they are the same as 'src', 'src_pos', 'dst'.
2835 
2836     BLOCK_COMMENT("scale indexes to element size");
2837     __ sll_ptr(src_pos, G3_elsize, src_pos);
2838     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2839     __ add(src, src_pos, from);       // src_addr
2840     __ add(dst, dst_pos, to);         // dst_addr
2841 
2842     BLOCK_COMMENT("choose copy loop based on element size");
2843     __ cmp(G3_elsize, 0);
2844     __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
2845     __ delayed()->signx(length, count); // length
2846 
2847     __ cmp(G3_elsize, LogBytesPerShort);
2848     __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
2849     __ delayed()->signx(length, count); // length
2850 
2851     __ cmp(G3_elsize, LogBytesPerInt);
2852     __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
2853     __ delayed()->signx(length, count); // length
2854 #ifdef ASSERT
2855     { Label L;
2856       __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
2857       __ stop("must be long copy, but elsize is wrong");
2858       __ bind(L);
2859     }
2860 #endif
2861     __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
2862     __ delayed()->signx(length, count); // length
2863 
2864     // ObjArrayKlass
2865   __ BIND(L_objArray);
2866     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
2867 
2868     Label L_plain_copy, L_checkcast_copy;
2869     //  test array classes for subtyping
2870     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
2871     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
2872     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
2873 
2874     // Identically typed arrays can be copied without element-wise checks.
2875     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2876                            O5_temp, G5_lh, L_failed);
2877 
2878     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2879     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2880     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2881     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2882     __ add(src, src_pos, from);       // src_addr
2883     __ add(dst, dst_pos, to);         // dst_addr
2884   __ BIND(L_plain_copy);
2885     __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
2886     __ delayed()->signx(length, count); // length
2887 
2888   __ BIND(L_checkcast_copy);
2889     // live at this point:  G3_src_klass, G4_dst_klass
2890     {
2891       // Before looking at dst.length, make sure dst is also an objArray.
2892       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
2893       __ cmp(G5_lh,                    O5_temp);
2894       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
2895 
2896       // It is safe to examine both src.length and dst.length.
2897       __ delayed();                             // match next insn to prev branch
2898       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2899                              O5_temp, G5_lh, L_failed);
2900 
2901       // Marshal the base address arguments now, freeing registers.
2902       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2903       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2904       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2905       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2906       __ add(src, src_pos, from);               // src_addr
2907       __ add(dst, dst_pos, to);                 // dst_addr
2908       __ signx(length, count);                  // length (reloaded)
2909 
2910       Register sco_temp = O3;                   // this register is free now
2911       assert_different_registers(from, to, count, sco_temp,
2912                                  G4_dst_klass, G3_src_klass);
2913 
2914       // Generate the type check.
2915       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2916       __ lduw(G4_dst_klass, sco_offset, sco_temp);
2917       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
2918                           O5_temp, L_plain_copy);
2919 
2920       // Fetch destination element klass from the ObjArrayKlass header.
2921       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2922 
2923       // the checkcast_copy loop needs two extra arguments:
2924       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
2925       // lduw(O4, sco_offset, O3);              // sco of elem klass
2926 
2927       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
2928       __ delayed()->lduw(O4, sco_offset, O3);
2929     }
2930 
2931   __ BIND(L_failed);
2932     __ retl();
2933     __ delayed()->sub(G0, 1, O0); // return -1
2934     return start;
2935   }
2936 
2937   //
2938   //  Generate stub for heap zeroing.
2939   //  "to" address is aligned to jlong (8 bytes).
2940   //
2941   // Arguments for generated stub:
2942   //      to:    O0
2943   //      count: O1 treated as signed (count of HeapWord)
2944   //             count could be 0
2945   //
2946   address generate_zero_aligned_words(const char* name) {
2947     __ align(CodeEntryAlignment);
2948     StubCodeMark mark(this, "StubRoutines", name);
2949     address start = __ pc();
2950 
2951     const Register to    = O0;   // source array address
2952     const Register count = O1;   // HeapWords count
2953     const Register temp  = O2;   // scratch
2954 
2955     Label Ldone;
2956     __ sllx(count, LogHeapWordSize, count); // to bytes count
2957     // Use BIS for zeroing
2958     __ bis_zeroing(to, count, temp, Ldone);
2959     __ bind(Ldone);
2960     __ retl();
2961     __ delayed()->nop();
2962     return start;
2963 }
2964 
2965   void generate_arraycopy_stubs() {
2966     address entry;
2967     address entry_jbyte_arraycopy;
2968     address entry_jshort_arraycopy;
2969     address entry_jint_arraycopy;
2970     address entry_oop_arraycopy;
2971     address entry_jlong_arraycopy;
2972     address entry_checkcast_arraycopy;
2973 
2974     address ucm_common_error_exit       =  generate_unsafecopy_common_error_exit();
2975     UnsafeCopyMemory::set_common_exit_stub_pc(ucm_common_error_exit);
2976 
2977     //*** jbyte
2978     // Always need aligned and unaligned versions
2979     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
2980                                                                                   "jbyte_disjoint_arraycopy");
2981     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
2982                                                                                   &entry_jbyte_arraycopy,
2983                                                                                   "jbyte_arraycopy");
2984     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
2985                                                                                   "arrayof_jbyte_disjoint_arraycopy");
2986     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
2987                                                                                   "arrayof_jbyte_arraycopy");
2988 
2989     //*** jshort
2990     // Always need aligned and unaligned versions
2991     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
2992                                                                                     "jshort_disjoint_arraycopy");
2993     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
2994                                                                                     &entry_jshort_arraycopy,
2995                                                                                     "jshort_arraycopy");
2996     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
2997                                                                                     "arrayof_jshort_disjoint_arraycopy");
2998     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
2999                                                                                     "arrayof_jshort_arraycopy");
3000 
3001     //*** jint
3002     // Aligned versions
3003     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
3004                                                                                 "arrayof_jint_disjoint_arraycopy");
3005     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
3006                                                                                 "arrayof_jint_arraycopy");
3007     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
3008     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
3009     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
3010                                                                                 "jint_disjoint_arraycopy");
3011     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
3012                                                                                 &entry_jint_arraycopy,
3013                                                                                 "jint_arraycopy");
3014 
3015     //*** jlong
3016     // It is always aligned
3017     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
3018                                                                                   "arrayof_jlong_disjoint_arraycopy");
3019     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
3020                                                                                   "arrayof_jlong_arraycopy");
3021     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
3022     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
3023 
3024 
3025     //*** oops
3026     // Aligned versions
3027     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
3028                                                                                       "arrayof_oop_disjoint_arraycopy");
3029     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
3030                                                                                       "arrayof_oop_arraycopy");
3031     // Aligned versions without pre-barriers
3032     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry,
3033                                                                                       "arrayof_oop_disjoint_arraycopy_uninit",
3034                                                                                       /*dest_uninitialized*/true);
3035     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
3036                                                                                       "arrayof_oop_arraycopy_uninit",
3037                                                                                       /*dest_uninitialized*/true);
3038     if (UseCompressedOops) {
3039       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
3040       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
3041                                                                                     "oop_disjoint_arraycopy");
3042       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3043                                                                                     "oop_arraycopy");
3044       // Unaligned versions without pre-barriers
3045       StubRoutines::_oop_disjoint_arraycopy_uninit     = generate_disjoint_oop_copy(false, &entry,
3046                                                                                     "oop_disjoint_arraycopy_uninit",
3047                                                                                     /*dest_uninitialized*/true);
3048       StubRoutines::_oop_arraycopy_uninit              = generate_conjoint_oop_copy(false, entry, NULL,
3049                                                                                     "oop_arraycopy_uninit",
3050                                                                                     /*dest_uninitialized*/true);
3051     } else {
3052       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
3053       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
3054       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
3055       StubRoutines::_oop_disjoint_arraycopy_uninit     = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit;
3056       StubRoutines::_oop_arraycopy_uninit              = StubRoutines::_arrayof_oop_arraycopy_uninit;
3057     }
3058 
3059     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3060     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
3061                                                                         /*dest_uninitialized*/true);
3062 
3063     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3064                                                               entry_jbyte_arraycopy,
3065                                                               entry_jshort_arraycopy,
3066                                                               entry_jint_arraycopy,
3067                                                               entry_jlong_arraycopy);
3068     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3069                                                                entry_jbyte_arraycopy,
3070                                                                entry_jshort_arraycopy,
3071                                                                entry_jint_arraycopy,
3072                                                                entry_oop_arraycopy,
3073                                                                entry_jlong_arraycopy,
3074                                                                entry_checkcast_arraycopy);
3075 
3076     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3077     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3078     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3079     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3080     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3081     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3082 
3083     if (UseBlockZeroing) {
3084       StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
3085     }
3086   }
3087 
3088   address generate_aescrypt_encryptBlock() {
3089     // required since we read expanded key 'int' array starting first element without alignment considerations
3090     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3091            "the following code assumes that first element of an int array is aligned to 8 bytes");
3092     __ align(CodeEntryAlignment);
3093     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
3094     Label L_load_misaligned_input, L_load_expanded_key, L_doLast128bit, L_storeOutput, L_store_misaligned_output;
3095     address start = __ pc();
3096     Register from = O0; // source byte array
3097     Register to = O1;   // destination byte array
3098     Register key = O2;  // expanded key array
3099     const Register keylen = O4; //reg for storing expanded key array length
3100 
3101     // read expanded key length
3102     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3103 
3104     // Method to address arbitrary alignment for load instructions:
3105     // Check last 3 bits of 'from' address to see if it is aligned to 8-byte boundary
3106     // If zero/aligned then continue with double FP load instructions
3107     // If not zero/mis-aligned then alignaddr will set GSR.align with number of bytes to skip during faligndata
3108     // alignaddr will also convert arbitrary aligned 'from' address to nearest 8-byte aligned address
3109     // load 3 * 8-byte components (to read 16 bytes input) in 3 different FP regs starting at this aligned address
3110     // faligndata will then extract (based on GSR.align value) the appropriate 8 bytes from the 2 source regs
3111 
3112     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3113     __ andcc(from, 7, G0);
3114     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
3115     __ delayed()->alignaddr(from, G0, from);
3116 
3117     // aligned case: load input into F54-F56
3118     __ ldf(FloatRegisterImpl::D, from, 0, F54);
3119     __ ldf(FloatRegisterImpl::D, from, 8, F56);
3120     __ ba_short(L_load_expanded_key);
3121 
3122     __ BIND(L_load_misaligned_input);
3123     __ ldf(FloatRegisterImpl::D, from, 0, F54);
3124     __ ldf(FloatRegisterImpl::D, from, 8, F56);
3125     __ ldf(FloatRegisterImpl::D, from, 16, F58);
3126     __ faligndata(F54, F56, F54);
3127     __ faligndata(F56, F58, F56);
3128 
3129     __ BIND(L_load_expanded_key);
3130     // Since we load expanded key buffers starting first element, 8-byte alignment is guaranteed
3131     for ( int i = 0;  i <= 38; i += 2 ) {
3132       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
3133     }
3134 
3135     // perform cipher transformation
3136     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
3137     __ fxor(FloatRegisterImpl::D, F2, F56, F56);
3138     // rounds 1 through 8
3139     for ( int i = 4;  i <= 28; i += 8 ) {
3140       __ aes_eround01(as_FloatRegister(i), F54, F56, F58);
3141       __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60);
3142       __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54);
3143       __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56);
3144     }
3145     __ aes_eround01(F36, F54, F56, F58); //round 9
3146     __ aes_eround23(F38, F54, F56, F60);
3147 
3148     // 128-bit original key size
3149     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit);
3150 
3151     for ( int i = 40;  i <= 50; i += 2 ) {
3152       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) );
3153     }
3154     __ aes_eround01(F40, F58, F60, F54); //round 10
3155     __ aes_eround23(F42, F58, F60, F56);
3156     __ aes_eround01(F44, F54, F56, F58); //round 11
3157     __ aes_eround23(F46, F54, F56, F60);
3158 
3159     // 192-bit original key size
3160     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput);
3161 
3162     __ ldf(FloatRegisterImpl::D, key, 208, F52);
3163     __ aes_eround01(F48, F58, F60, F54); //round 12
3164     __ aes_eround23(F50, F58, F60, F56);
3165     __ ldf(FloatRegisterImpl::D, key, 216, F46);
3166     __ ldf(FloatRegisterImpl::D, key, 224, F48);
3167     __ ldf(FloatRegisterImpl::D, key, 232, F50);
3168     __ aes_eround01(F52, F54, F56, F58); //round 13
3169     __ aes_eround23(F46, F54, F56, F60);
3170     __ ba_short(L_storeOutput);
3171 
3172     __ BIND(L_doLast128bit);
3173     __ ldf(FloatRegisterImpl::D, key, 160, F48);
3174     __ ldf(FloatRegisterImpl::D, key, 168, F50);
3175 
3176     __ BIND(L_storeOutput);
3177     // perform last round of encryption common for all key sizes
3178     __ aes_eround01_l(F48, F58, F60, F54); //last round
3179     __ aes_eround23_l(F50, F58, F60, F56);
3180 
3181     // Method to address arbitrary alignment for store instructions:
3182     // Check last 3 bits of 'dest' address to see if it is aligned to 8-byte boundary
3183     // If zero/aligned then continue with double FP store instructions
3184     // If not zero/mis-aligned then edge8n will generate edge mask in result reg (O3 in below case)
3185     // Example: If dest address is 0x07 and nearest 8-byte aligned address is 0x00 then edge mask will be 00000001
3186     // Compute (8-n) where n is # of bytes skipped by partial store(stpartialf) inst from edge mask, n=7 in this case
3187     // We get the value of n from the andcc that checks 'dest' alignment. n is available in O5 in below case.
3188     // Set GSR.align to (8-n) using alignaddr
3189     // Circular byte shift store values by n places so that the original bytes are at correct position for stpartialf
3190     // Set the arbitrarily aligned 'dest' address to nearest 8-byte aligned address
3191     // Store (partial) the original first (8-n) bytes starting at the original 'dest' address
3192     // Negate the edge mask so that the subsequent stpartialf can store the original (8-n-1)th through 8th bytes at appropriate address
3193     // We need to execute this process for both the 8-byte result values
3194 
3195     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3196     __ andcc(to, 7, O5);
3197     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
3198     __ delayed()->edge8n(to, G0, O3);
3199 
3200     // aligned case: store output into the destination array
3201     __ stf(FloatRegisterImpl::D, F54, to, 0);
3202     __ retl();
3203     __ delayed()->stf(FloatRegisterImpl::D, F56, to, 8);
3204 
3205     __ BIND(L_store_misaligned_output);
3206     __ add(to, 8, O4);
3207     __ mov(8, O2);
3208     __ sub(O2, O5, O2);
3209     __ alignaddr(O2, G0, O2);
3210     __ faligndata(F54, F54, F54);
3211     __ faligndata(F56, F56, F56);
3212     __ and3(to, -8, to);
3213     __ and3(O4, -8, O4);
3214     __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
3215     __ stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
3216     __ add(to, 8, to);
3217     __ add(O4, 8, O4);
3218     __ orn(G0, O3, O3);
3219     __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
3220     __ retl();
3221     __ delayed()->stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
3222 
3223     return start;
3224   }
3225 
3226   address generate_aescrypt_decryptBlock() {
3227     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3228            "the following code assumes that first element of an int array is aligned to 8 bytes");
3229     // required since we read original key 'byte' array as well in the decryption stubs
3230     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
3231            "the following code assumes that first element of a byte array is aligned to 8 bytes");
3232     __ align(CodeEntryAlignment);
3233     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
3234     address start = __ pc();
3235     Label L_load_misaligned_input, L_load_original_key, L_expand192bit, L_expand256bit, L_reload_misaligned_input;
3236     Label L_256bit_transform, L_common_transform, L_store_misaligned_output;
3237     Register from = O0; // source byte array
3238     Register to = O1;   // destination byte array
3239     Register key = O2;  // expanded key array
3240     Register original_key = O3;  // original key array only required during decryption
3241     const Register keylen = O4;  // reg for storing expanded key array length
3242 
3243     // read expanded key array length
3244     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3245 
3246     // save 'from' since we may need to recheck alignment in case of 256-bit decryption
3247     __ mov(from, G1);
3248 
3249     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3250     __ andcc(from, 7, G0);
3251     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
3252     __ delayed()->alignaddr(from, G0, from);
3253 
3254     // aligned case: load input into F52-F54
3255     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3256     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3257     __ ba_short(L_load_original_key);
3258 
3259     __ BIND(L_load_misaligned_input);
3260     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3261     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3262     __ ldf(FloatRegisterImpl::D, from, 16, F56);
3263     __ faligndata(F52, F54, F52);
3264     __ faligndata(F54, F56, F54);
3265 
3266     __ BIND(L_load_original_key);
3267     // load original key from SunJCE expanded decryption key
3268     // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
3269     for ( int i = 0;  i <= 3; i++ ) {
3270       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3271     }
3272 
3273     // 256-bit original key size
3274     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
3275 
3276     // 192-bit original key size
3277     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
3278 
3279     // 128-bit original key size
3280     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3281     for ( int i = 0;  i <= 36; i += 4 ) {
3282       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
3283       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
3284     }
3285 
3286     // perform 128-bit key specific inverse cipher transformation
3287     __ fxor(FloatRegisterImpl::D, F42, F54, F54);
3288     __ fxor(FloatRegisterImpl::D, F40, F52, F52);
3289     __ ba_short(L_common_transform);
3290 
3291     __ BIND(L_expand192bit);
3292 
3293     // start loading rest of the 192-bit key
3294     __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
3295     __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
3296 
3297     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3298     for ( int i = 0;  i <= 36; i += 6 ) {
3299       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
3300       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
3301       __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
3302     }
3303     __ aes_kexpand1(F42, F46, 7, F48);
3304     __ aes_kexpand2(F44, F48, F50);
3305 
3306     // perform 192-bit key specific inverse cipher transformation
3307     __ fxor(FloatRegisterImpl::D, F50, F54, F54);
3308     __ fxor(FloatRegisterImpl::D, F48, F52, F52);
3309     __ aes_dround23(F46, F52, F54, F58);
3310     __ aes_dround01(F44, F52, F54, F56);
3311     __ aes_dround23(F42, F56, F58, F54);
3312     __ aes_dround01(F40, F56, F58, F52);
3313     __ ba_short(L_common_transform);
3314 
3315     __ BIND(L_expand256bit);
3316 
3317     // load rest of the 256-bit key
3318     for ( int i = 4;  i <= 7; i++ ) {
3319       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3320     }
3321 
3322     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3323     for ( int i = 0;  i <= 40; i += 8 ) {
3324       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
3325       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
3326       __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
3327       __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
3328     }
3329     __ aes_kexpand1(F48, F54, 6, F56);
3330     __ aes_kexpand2(F50, F56, F58);
3331 
3332     for ( int i = 0;  i <= 6; i += 2 ) {
3333       __ fsrc2(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
3334     }
3335 
3336     // reload original 'from' address
3337     __ mov(G1, from);
3338 
3339     // re-check 8-byte alignment
3340     __ andcc(from, 7, G0);
3341     __ br(Assembler::notZero, true, Assembler::pn, L_reload_misaligned_input);
3342     __ delayed()->alignaddr(from, G0, from);
3343 
3344     // aligned case: load input into F52-F54
3345     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3346     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3347     __ ba_short(L_256bit_transform);
3348 
3349     __ BIND(L_reload_misaligned_input);
3350     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3351     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3352     __ ldf(FloatRegisterImpl::D, from, 16, F56);
3353     __ faligndata(F52, F54, F52);
3354     __ faligndata(F54, F56, F54);
3355 
3356     // perform 256-bit key specific inverse cipher transformation
3357     __ BIND(L_256bit_transform);
3358     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
3359     __ fxor(FloatRegisterImpl::D, F2, F52, F52);
3360     __ aes_dround23(F4, F52, F54, F58);
3361     __ aes_dround01(F6, F52, F54, F56);
3362     __ aes_dround23(F50, F56, F58, F54);
3363     __ aes_dround01(F48, F56, F58, F52);
3364     __ aes_dround23(F46, F52, F54, F58);
3365     __ aes_dround01(F44, F52, F54, F56);
3366     __ aes_dround23(F42, F56, F58, F54);
3367     __ aes_dround01(F40, F56, F58, F52);
3368 
3369     for ( int i = 0;  i <= 7; i++ ) {
3370       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3371     }
3372 
3373     // perform inverse cipher transformations common for all key sizes
3374     __ BIND(L_common_transform);
3375     for ( int i = 38;  i >= 6; i -= 8 ) {
3376       __ aes_dround23(as_FloatRegister(i), F52, F54, F58);
3377       __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56);
3378       if ( i != 6) {
3379         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54);
3380         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52);
3381       } else {
3382         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54);
3383         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52);
3384       }
3385     }
3386 
3387     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3388     __ andcc(to, 7, O5);
3389     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
3390     __ delayed()->edge8n(to, G0, O3);
3391 
3392     // aligned case: store output into the destination array
3393     __ stf(FloatRegisterImpl::D, F52, to, 0);
3394     __ retl();
3395     __ delayed()->stf(FloatRegisterImpl::D, F54, to, 8);
3396 
3397     __ BIND(L_store_misaligned_output);
3398     __ add(to, 8, O4);
3399     __ mov(8, O2);
3400     __ sub(O2, O5, O2);
3401     __ alignaddr(O2, G0, O2);
3402     __ faligndata(F52, F52, F52);
3403     __ faligndata(F54, F54, F54);
3404     __ and3(to, -8, to);
3405     __ and3(O4, -8, O4);
3406     __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
3407     __ stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
3408     __ add(to, 8, to);
3409     __ add(O4, 8, O4);
3410     __ orn(G0, O3, O3);
3411     __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
3412     __ retl();
3413     __ delayed()->stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
3414 
3415     return start;
3416   }
3417 
3418   address generate_cipherBlockChaining_encryptAESCrypt() {
3419     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3420            "the following code assumes that first element of an int array is aligned to 8 bytes");
3421     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
3422            "the following code assumes that first element of a byte array is aligned to 8 bytes");
3423     __ align(CodeEntryAlignment);
3424     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
3425     Label L_cbcenc128, L_load_misaligned_input_128bit, L_128bit_transform, L_store_misaligned_output_128bit;
3426     Label L_check_loop_end_128bit, L_cbcenc192, L_load_misaligned_input_192bit, L_192bit_transform;
3427     Label L_store_misaligned_output_192bit, L_check_loop_end_192bit, L_cbcenc256, L_load_misaligned_input_256bit;
3428     Label L_256bit_transform, L_store_misaligned_output_256bit, L_check_loop_end_256bit;
3429     address start = __ pc();
3430     Register from = I0; // source byte array
3431     Register to = I1;   // destination byte array
3432     Register key = I2;  // expanded key array
3433     Register rvec = I3; // init vector
3434     const Register len_reg = I4; // cipher length
3435     const Register keylen = I5;  // reg for storing expanded key array length
3436 
3437     __ save_frame(0);
3438     // save cipher len to return in the end
3439     __ mov(len_reg, L0);
3440 
3441     // read expanded key length
3442     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3443 
3444     // load initial vector, 8-byte alignment is guranteed
3445     __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
3446     __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
3447     // load key, 8-byte alignment is guranteed
3448     __ ldx(key,0,G1);
3449     __ ldx(key,8,G5);
3450 
3451     // start loading expanded key, 8-byte alignment is guranteed
3452     for ( int i = 0, j = 16;  i <= 38; i += 2, j += 8 ) {
3453       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
3454     }
3455 
3456     // 128-bit original key size
3457     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128);
3458 
3459     for ( int i = 40, j = 176;  i <= 46; i += 2, j += 8 ) {
3460       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
3461     }
3462 
3463     // 192-bit original key size
3464     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192);
3465 
3466     for ( int i = 48, j = 208;  i <= 54; i += 2, j += 8 ) {
3467       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
3468     }
3469 
3470     // 256-bit original key size
3471     __ ba_short(L_cbcenc256);
3472 
3473     __ align(OptoLoopAlignment);
3474     __ BIND(L_cbcenc128);
3475     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3476     __ andcc(from, 7, G0);
3477     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_128bit);
3478     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
3479 
3480     // aligned case: load input into G3 and G4
3481     __ ldx(from,0,G3);
3482     __ ldx(from,8,G4);
3483     __ ba_short(L_128bit_transform);
3484 
3485     __ BIND(L_load_misaligned_input_128bit);
3486     // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
3487     __ alignaddr(from, G0, from);
3488     __ ldf(FloatRegisterImpl::D, from, 0, F48);
3489     __ ldf(FloatRegisterImpl::D, from, 8, F50);
3490     __ ldf(FloatRegisterImpl::D, from, 16, F52);
3491     __ faligndata(F48, F50, F48);
3492     __ faligndata(F50, F52, F50);
3493     __ movdtox(F48, G3);
3494     __ movdtox(F50, G4);
3495     __ mov(L1, from);
3496 
3497     __ BIND(L_128bit_transform);
3498     __ xor3(G1,G3,G3);
3499     __ xor3(G5,G4,G4);
3500     __ movxtod(G3,F56);
3501     __ movxtod(G4,F58);
3502     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
3503     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
3504 
3505     // TEN_EROUNDS
3506     for ( int i = 0;  i <= 32; i += 8 ) {
3507       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
3508       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
3509       if (i != 32 ) {
3510         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
3511         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
3512       } else {
3513         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
3514         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
3515       }
3516     }
3517 
3518     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3519     __ andcc(to, 7, L1);
3520     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_128bit);
3521     __ delayed()->edge8n(to, G0, L2);
3522 
3523     // aligned case: store output into the destination array
3524     __ stf(FloatRegisterImpl::D, F60, to, 0);
3525     __ stf(FloatRegisterImpl::D, F62, to, 8);
3526     __ ba_short(L_check_loop_end_128bit);
3527 
3528     __ BIND(L_store_misaligned_output_128bit);
3529     __ add(to, 8, L3);
3530     __ mov(8, L4);
3531     __ sub(L4, L1, L4);
3532     __ alignaddr(L4, G0, L4);
3533     // save cipher text before circular right shift
3534     // as it needs to be stored as iv for next block (see code before next retl)
3535     __ movdtox(F60, L6);
3536     __ movdtox(F62, L7);
3537     __ faligndata(F60, F60, F60);
3538     __ faligndata(F62, F62, F62);
3539     __ mov(to, L5);
3540     __ and3(to, -8, to);
3541     __ and3(L3, -8, L3);
3542     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3543     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3544     __ add(to, 8, to);
3545     __ add(L3, 8, L3);
3546     __ orn(G0, L2, L2);
3547     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3548     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3549     __ mov(L5, to);
3550     __ movxtod(L6, F60);
3551     __ movxtod(L7, F62);
3552 
3553     __ BIND(L_check_loop_end_128bit);
3554     __ add(from, 16, from);
3555     __ add(to, 16, to);
3556     __ subcc(len_reg, 16, len_reg);
3557     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
3558     __ delayed()->nop();
3559     // re-init intial vector for next block, 8-byte alignment is guaranteed
3560     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
3561     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
3562     __ mov(L0, I0);
3563     __ ret();
3564     __ delayed()->restore();
3565 
3566     __ align(OptoLoopAlignment);
3567     __ BIND(L_cbcenc192);
3568     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3569     __ andcc(from, 7, G0);
3570     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_192bit);
3571     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
3572 
3573     // aligned case: load input into G3 and G4
3574     __ ldx(from,0,G3);
3575     __ ldx(from,8,G4);
3576     __ ba_short(L_192bit_transform);
3577 
3578     __ BIND(L_load_misaligned_input_192bit);
3579     // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
3580     __ alignaddr(from, G0, from);
3581     __ ldf(FloatRegisterImpl::D, from, 0, F48);
3582     __ ldf(FloatRegisterImpl::D, from, 8, F50);
3583     __ ldf(FloatRegisterImpl::D, from, 16, F52);
3584     __ faligndata(F48, F50, F48);
3585     __ faligndata(F50, F52, F50);
3586     __ movdtox(F48, G3);
3587     __ movdtox(F50, G4);
3588     __ mov(L1, from);
3589 
3590     __ BIND(L_192bit_transform);
3591     __ xor3(G1,G3,G3);
3592     __ xor3(G5,G4,G4);
3593     __ movxtod(G3,F56);
3594     __ movxtod(G4,F58);
3595     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
3596     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
3597 
3598     // TWELEVE_EROUNDS
3599     for ( int i = 0;  i <= 40; i += 8 ) {
3600       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
3601       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
3602       if (i != 40 ) {
3603         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
3604         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
3605       } else {
3606         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
3607         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
3608       }
3609     }
3610 
3611     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3612     __ andcc(to, 7, L1);
3613     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_192bit);
3614     __ delayed()->edge8n(to, G0, L2);
3615 
3616     // aligned case: store output into the destination array
3617     __ stf(FloatRegisterImpl::D, F60, to, 0);
3618     __ stf(FloatRegisterImpl::D, F62, to, 8);
3619     __ ba_short(L_check_loop_end_192bit);
3620 
3621     __ BIND(L_store_misaligned_output_192bit);
3622     __ add(to, 8, L3);
3623     __ mov(8, L4);
3624     __ sub(L4, L1, L4);
3625     __ alignaddr(L4, G0, L4);
3626     __ movdtox(F60, L6);
3627     __ movdtox(F62, L7);
3628     __ faligndata(F60, F60, F60);
3629     __ faligndata(F62, F62, F62);
3630     __ mov(to, L5);
3631     __ and3(to, -8, to);
3632     __ and3(L3, -8, L3);
3633     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3634     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3635     __ add(to, 8, to);
3636     __ add(L3, 8, L3);
3637     __ orn(G0, L2, L2);
3638     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3639     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3640     __ mov(L5, to);
3641     __ movxtod(L6, F60);
3642     __ movxtod(L7, F62);
3643 
3644     __ BIND(L_check_loop_end_192bit);
3645     __ add(from, 16, from);
3646     __ subcc(len_reg, 16, len_reg);
3647     __ add(to, 16, to);
3648     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
3649     __ delayed()->nop();
3650     // re-init intial vector for next block, 8-byte alignment is guaranteed
3651     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
3652     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
3653     __ mov(L0, I0);
3654     __ ret();
3655     __ delayed()->restore();
3656 
3657     __ align(OptoLoopAlignment);
3658     __ BIND(L_cbcenc256);
3659     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3660     __ andcc(from, 7, G0);
3661     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_256bit);
3662     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
3663 
3664     // aligned case: load input into G3 and G4
3665     __ ldx(from,0,G3);
3666     __ ldx(from,8,G4);
3667     __ ba_short(L_256bit_transform);
3668 
3669     __ BIND(L_load_misaligned_input_256bit);
3670     // cannot clobber F48, F50 and F52. F56, F58 can be used though
3671     __ alignaddr(from, G0, from);
3672     __ movdtox(F60, L2); // save F60 before overwriting
3673     __ ldf(FloatRegisterImpl::D, from, 0, F56);
3674     __ ldf(FloatRegisterImpl::D, from, 8, F58);
3675     __ ldf(FloatRegisterImpl::D, from, 16, F60);
3676     __ faligndata(F56, F58, F56);
3677     __ faligndata(F58, F60, F58);
3678     __ movdtox(F56, G3);
3679     __ movdtox(F58, G4);
3680     __ mov(L1, from);
3681     __ movxtod(L2, F60);
3682 
3683     __ BIND(L_256bit_transform);
3684     __ xor3(G1,G3,G3);
3685     __ xor3(G5,G4,G4);
3686     __ movxtod(G3,F56);
3687     __ movxtod(G4,F58);
3688     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
3689     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
3690 
3691     // FOURTEEN_EROUNDS
3692     for ( int i = 0;  i <= 48; i += 8 ) {
3693       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
3694       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
3695       if (i != 48 ) {
3696         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
3697         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
3698       } else {
3699         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
3700         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
3701       }
3702     }
3703 
3704     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3705     __ andcc(to, 7, L1);
3706     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_256bit);
3707     __ delayed()->edge8n(to, G0, L2);
3708 
3709     // aligned case: store output into the destination array
3710     __ stf(FloatRegisterImpl::D, F60, to, 0);
3711     __ stf(FloatRegisterImpl::D, F62, to, 8);
3712     __ ba_short(L_check_loop_end_256bit);
3713 
3714     __ BIND(L_store_misaligned_output_256bit);
3715     __ add(to, 8, L3);
3716     __ mov(8, L4);
3717     __ sub(L4, L1, L4);
3718     __ alignaddr(L4, G0, L4);
3719     __ movdtox(F60, L6);
3720     __ movdtox(F62, L7);
3721     __ faligndata(F60, F60, F60);
3722     __ faligndata(F62, F62, F62);
3723     __ mov(to, L5);
3724     __ and3(to, -8, to);
3725     __ and3(L3, -8, L3);
3726     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3727     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3728     __ add(to, 8, to);
3729     __ add(L3, 8, L3);
3730     __ orn(G0, L2, L2);
3731     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3732     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3733     __ mov(L5, to);
3734     __ movxtod(L6, F60);
3735     __ movxtod(L7, F62);
3736 
3737     __ BIND(L_check_loop_end_256bit);
3738     __ add(from, 16, from);
3739     __ subcc(len_reg, 16, len_reg);
3740     __ add(to, 16, to);
3741     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
3742     __ delayed()->nop();
3743     // re-init intial vector for next block, 8-byte alignment is guaranteed
3744     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
3745     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
3746     __ mov(L0, I0);
3747     __ ret();
3748     __ delayed()->restore();
3749 
3750     return start;
3751   }
3752 
3753   address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
3754     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3755            "the following code assumes that first element of an int array is aligned to 8 bytes");
3756     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
3757            "the following code assumes that first element of a byte array is aligned to 8 bytes");
3758     __ align(CodeEntryAlignment);
3759     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
3760     Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
3761     Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
3762     Label L_load_misaligned_input_first_block, L_transform_first_block, L_load_misaligned_next2_blocks128, L_transform_next2_blocks128;
3763     Label L_load_misaligned_next2_blocks192, L_transform_next2_blocks192, L_load_misaligned_next2_blocks256, L_transform_next2_blocks256;
3764     Label L_store_misaligned_output_first_block, L_check_decrypt_end, L_store_misaligned_output_next2_blocks128;
3765     Label L_check_decrypt_loop_end128, L_store_misaligned_output_next2_blocks192, L_check_decrypt_loop_end192;
3766     Label L_store_misaligned_output_next2_blocks256, L_check_decrypt_loop_end256;
3767     address start = __ pc();
3768     Register from = I0; // source byte array
3769     Register to = I1;   // destination byte array
3770     Register key = I2;  // expanded key array
3771     Register rvec = I3; // init vector
3772     const Register len_reg = I4; // cipher length
3773     const Register original_key = I5;  // original key array only required during decryption
3774     const Register keylen = L6;  // reg for storing expanded key array length
3775 
3776     __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
3777     // save cipher len to return in the end
3778     __ mov(len_reg, L7);
3779 
3780     // load original key from SunJCE expanded decryption key
3781     // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
3782     for ( int i = 0;  i <= 3; i++ ) {
3783       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3784     }
3785 
3786     // load initial vector, 8-byte alignment is guaranteed
3787     __ ldx(rvec,0,L0);
3788     __ ldx(rvec,8,L1);
3789 
3790     // read expanded key array length
3791     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3792 
3793     // 256-bit original key size
3794     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
3795 
3796     // 192-bit original key size
3797     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
3798 
3799     // 128-bit original key size
3800     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3801     for ( int i = 0;  i <= 36; i += 4 ) {
3802       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
3803       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
3804     }
3805 
3806     // load expanded key[last-1] and key[last] elements
3807     __ movdtox(F40,L2);
3808     __ movdtox(F42,L3);
3809 
3810     __ and3(len_reg, 16, L4);
3811     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks128);
3812     __ nop();
3813 
3814     __ ba_short(L_dec_first_block_start);
3815 
3816     __ BIND(L_expand192bit);
3817     // load rest of the 192-bit key
3818     __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
3819     __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
3820 
3821     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3822     for ( int i = 0;  i <= 36; i += 6 ) {
3823       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
3824       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
3825       __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
3826     }
3827     __ aes_kexpand1(F42, F46, 7, F48);
3828     __ aes_kexpand2(F44, F48, F50);
3829 
3830     // load expanded key[last-1] and key[last] elements
3831     __ movdtox(F48,L2);
3832     __ movdtox(F50,L3);
3833 
3834     __ and3(len_reg, 16, L4);
3835     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks192);
3836     __ nop();
3837 
3838     __ ba_short(L_dec_first_block_start);
3839 
3840     __ BIND(L_expand256bit);
3841     // load rest of the 256-bit key
3842     for ( int i = 4;  i <= 7; i++ ) {
3843       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3844     }
3845 
3846     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3847     for ( int i = 0;  i <= 40; i += 8 ) {
3848       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
3849       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
3850       __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
3851       __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
3852     }
3853     __ aes_kexpand1(F48, F54, 6, F56);
3854     __ aes_kexpand2(F50, F56, F58);
3855 
3856     // load expanded key[last-1] and key[last] elements
3857     __ movdtox(F56,L2);
3858     __ movdtox(F58,L3);
3859 
3860     __ and3(len_reg, 16, L4);
3861     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks256);
3862 
3863     __ BIND(L_dec_first_block_start);
3864     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3865     __ andcc(from, 7, G0);
3866     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_first_block);
3867     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
3868 
3869     // aligned case: load input into L4 and L5
3870     __ ldx(from,0,L4);
3871     __ ldx(from,8,L5);
3872     __ ba_short(L_transform_first_block);
3873 
3874     __ BIND(L_load_misaligned_input_first_block);
3875     __ alignaddr(from, G0, from);
3876     // F58, F60, F62 can be clobbered
3877     __ ldf(FloatRegisterImpl::D, from, 0, F58);
3878     __ ldf(FloatRegisterImpl::D, from, 8, F60);
3879     __ ldf(FloatRegisterImpl::D, from, 16, F62);
3880     __ faligndata(F58, F60, F58);
3881     __ faligndata(F60, F62, F60);
3882     __ movdtox(F58, L4);
3883     __ movdtox(F60, L5);
3884     __ mov(G1, from);
3885 
3886     __ BIND(L_transform_first_block);
3887     __ xor3(L2,L4,G1);
3888     __ movxtod(G1,F60);
3889     __ xor3(L3,L5,G1);
3890     __ movxtod(G1,F62);
3891 
3892     // 128-bit original key size
3893     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128);
3894 
3895     // 192-bit original key size
3896     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192);
3897 
3898     __ aes_dround23(F54, F60, F62, F58);
3899     __ aes_dround01(F52, F60, F62, F56);
3900     __ aes_dround23(F50, F56, F58, F62);
3901     __ aes_dround01(F48, F56, F58, F60);
3902 
3903     __ BIND(L_dec_first_block192);
3904     __ aes_dround23(F46, F60, F62, F58);
3905     __ aes_dround01(F44, F60, F62, F56);
3906     __ aes_dround23(F42, F56, F58, F62);
3907     __ aes_dround01(F40, F56, F58, F60);
3908 
3909     __ BIND(L_dec_first_block128);
3910     for ( int i = 38;  i >= 6; i -= 8 ) {
3911       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
3912       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
3913       if ( i != 6) {
3914         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
3915         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
3916       } else {
3917         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
3918         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
3919       }
3920     }
3921 
3922     __ movxtod(L0,F56);
3923     __ movxtod(L1,F58);
3924     __ mov(L4,L0);
3925     __ mov(L5,L1);
3926     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
3927     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
3928 
3929     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3930     __ andcc(to, 7, G1);
3931     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_first_block);
3932     __ delayed()->edge8n(to, G0, G2);
3933 
3934     // aligned case: store output into the destination array
3935     __ stf(FloatRegisterImpl::D, F60, to, 0);
3936     __ stf(FloatRegisterImpl::D, F62, to, 8);
3937     __ ba_short(L_check_decrypt_end);
3938 
3939     __ BIND(L_store_misaligned_output_first_block);
3940     __ add(to, 8, G3);
3941     __ mov(8, G4);
3942     __ sub(G4, G1, G4);
3943     __ alignaddr(G4, G0, G4);
3944     __ faligndata(F60, F60, F60);
3945     __ faligndata(F62, F62, F62);
3946     __ mov(to, G1);
3947     __ and3(to, -8, to);
3948     __ and3(G3, -8, G3);
3949     __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
3950     __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
3951     __ add(to, 8, to);
3952     __ add(G3, 8, G3);
3953     __ orn(G0, G2, G2);
3954     __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
3955     __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
3956     __ mov(G1, to);
3957 
3958     __ BIND(L_check_decrypt_end);
3959     __ add(from, 16, from);
3960     __ add(to, 16, to);
3961     __ subcc(len_reg, 16, len_reg);
3962     __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end);
3963     __ delayed()->nop();
3964 
3965     // 256-bit original key size
3966     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256);
3967 
3968     // 192-bit original key size
3969     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192);
3970 
3971     __ align(OptoLoopAlignment);
3972     __ BIND(L_dec_next2_blocks128);
3973     __ nop();
3974 
3975     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3976     __ andcc(from, 7, G0);
3977     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks128);
3978     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
3979 
3980     // aligned case: load input into G4, G5, L4 and L5
3981     __ ldx(from,0,G4);
3982     __ ldx(from,8,G5);
3983     __ ldx(from,16,L4);
3984     __ ldx(from,24,L5);
3985     __ ba_short(L_transform_next2_blocks128);
3986 
3987     __ BIND(L_load_misaligned_next2_blocks128);
3988     __ alignaddr(from, G0, from);
3989     // F40, F42, F58, F60, F62 can be clobbered
3990     __ ldf(FloatRegisterImpl::D, from, 0, F40);
3991     __ ldf(FloatRegisterImpl::D, from, 8, F42);
3992     __ ldf(FloatRegisterImpl::D, from, 16, F60);
3993     __ ldf(FloatRegisterImpl::D, from, 24, F62);
3994     __ ldf(FloatRegisterImpl::D, from, 32, F58);
3995     __ faligndata(F40, F42, F40);
3996     __ faligndata(F42, F60, F42);
3997     __ faligndata(F60, F62, F60);
3998     __ faligndata(F62, F58, F62);
3999     __ movdtox(F40, G4);
4000     __ movdtox(F42, G5);
4001     __ movdtox(F60, L4);
4002     __ movdtox(F62, L5);
4003     __ mov(G1, from);
4004 
4005     __ BIND(L_transform_next2_blocks128);
4006     // F40:F42 used for first 16-bytes
4007     __ xor3(L2,G4,G1);
4008     __ movxtod(G1,F40);
4009     __ xor3(L3,G5,G1);
4010     __ movxtod(G1,F42);
4011 
4012     // F60:F62 used for next 16-bytes
4013     __ xor3(L2,L4,G1);
4014     __ movxtod(G1,F60);
4015     __ xor3(L3,L5,G1);
4016     __ movxtod(G1,F62);
4017 
4018     for ( int i = 38;  i >= 6; i -= 8 ) {
4019       __ aes_dround23(as_FloatRegister(i), F40, F42, F44);
4020       __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46);
4021       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
4022       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
4023       if (i != 6 ) {
4024         __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42);
4025         __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40);
4026         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
4027         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
4028       } else {
4029         __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42);
4030         __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40);
4031         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
4032         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
4033       }
4034     }
4035 
4036     __ movxtod(L0,F46);
4037     __ movxtod(L1,F44);
4038     __ fxor(FloatRegisterImpl::D, F46, F40, F40);
4039     __ fxor(FloatRegisterImpl::D, F44, F42, F42);
4040 
4041     __ movxtod(G4,F56);
4042     __ movxtod(G5,F58);
4043     __ mov(L4,L0);
4044     __ mov(L5,L1);
4045     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
4046     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
4047 
4048     // For mis-aligned store of 32 bytes of result we can do:
4049     // Circular right-shift all 4 FP registers so that 'head' and 'tail'
4050     // parts that need to be stored starting at mis-aligned address are in a FP reg
4051     // the other 3 FP regs can thus be stored using regular store
4052     // we then use the edge + partial-store mechanism to store the 'head' and 'tail' parts
4053 
4054     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
4055     __ andcc(to, 7, G1);
4056     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks128);
4057     __ delayed()->edge8n(to, G0, G2);
4058 
4059     // aligned case: store output into the destination array
4060     __ stf(FloatRegisterImpl::D, F40, to, 0);
4061     __ stf(FloatRegisterImpl::D, F42, to, 8);
4062     __ stf(FloatRegisterImpl::D, F60, to, 16);
4063     __ stf(FloatRegisterImpl::D, F62, to, 24);
4064     __ ba_short(L_check_decrypt_loop_end128);
4065 
4066     __ BIND(L_store_misaligned_output_next2_blocks128);
4067     __ mov(8, G4);
4068     __ sub(G4, G1, G4);
4069     __ alignaddr(G4, G0, G4);
4070     __ faligndata(F40, F42, F56); // F56 can be clobbered
4071     __ faligndata(F42, F60, F42);
4072     __ faligndata(F60, F62, F60);
4073     __ faligndata(F62, F40, F40);
4074     __ mov(to, G1);
4075     __ and3(to, -8, to);
4076     __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
4077     __ stf(FloatRegisterImpl::D, F56, to, 8);
4078     __ stf(FloatRegisterImpl::D, F42, to, 16);
4079     __ stf(FloatRegisterImpl::D, F60, to, 24);
4080     __ add(to, 32, to);
4081     __ orn(G0, G2, G2);
4082     __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
4083     __ mov(G1, to);
4084 
4085     __ BIND(L_check_decrypt_loop_end128);
4086     __ add(from, 32, from);
4087     __ add(to, 32, to);
4088     __ subcc(len_reg, 32, len_reg);
4089     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
4090     __ delayed()->nop();
4091     __ ba_short(L_cbcdec_end);
4092 
4093     __ align(OptoLoopAlignment);
4094     __ BIND(L_dec_next2_blocks192);
4095     __ nop();
4096 
4097     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
4098     __ andcc(from, 7, G0);
4099     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks192);
4100     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
4101 
4102     // aligned case: load input into G4, G5, L4 and L5
4103     __ ldx(from,0,G4);
4104     __ ldx(from,8,G5);
4105     __ ldx(from,16,L4);
4106     __ ldx(from,24,L5);
4107     __ ba_short(L_transform_next2_blocks192);
4108 
4109     __ BIND(L_load_misaligned_next2_blocks192);
4110     __ alignaddr(from, G0, from);
4111     // F48, F50, F52, F60, F62 can be clobbered
4112     __ ldf(FloatRegisterImpl::D, from, 0, F48);
4113     __ ldf(FloatRegisterImpl::D, from, 8, F50);
4114     __ ldf(FloatRegisterImpl::D, from, 16, F60);
4115     __ ldf(FloatRegisterImpl::D, from, 24, F62);
4116     __ ldf(FloatRegisterImpl::D, from, 32, F52);
4117     __ faligndata(F48, F50, F48);
4118     __ faligndata(F50, F60, F50);
4119     __ faligndata(F60, F62, F60);
4120     __ faligndata(F62, F52, F62);
4121     __ movdtox(F48, G4);
4122     __ movdtox(F50, G5);
4123     __ movdtox(F60, L4);
4124     __ movdtox(F62, L5);
4125     __ mov(G1, from);
4126 
4127     __ BIND(L_transform_next2_blocks192);
4128     // F48:F50 used for first 16-bytes
4129     __ xor3(L2,G4,G1);
4130     __ movxtod(G1,F48);
4131     __ xor3(L3,G5,G1);
4132     __ movxtod(G1,F50);
4133 
4134     // F60:F62 used for next 16-bytes
4135     __ xor3(L2,L4,G1);
4136     __ movxtod(G1,F60);
4137     __ xor3(L3,L5,G1);
4138     __ movxtod(G1,F62);
4139 
4140     for ( int i = 46;  i >= 6; i -= 8 ) {
4141       __ aes_dround23(as_FloatRegister(i), F48, F50, F52);
4142       __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54);
4143       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
4144       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
4145       if (i != 6 ) {
4146         __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50);
4147         __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48);
4148         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
4149         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
4150       } else {
4151         __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50);
4152         __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48);
4153         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
4154         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
4155       }
4156     }
4157 
4158     __ movxtod(L0,F54);
4159     __ movxtod(L1,F52);
4160     __ fxor(FloatRegisterImpl::D, F54, F48, F48);
4161     __ fxor(FloatRegisterImpl::D, F52, F50, F50);
4162 
4163     __ movxtod(G4,F56);
4164     __ movxtod(G5,F58);
4165     __ mov(L4,L0);
4166     __ mov(L5,L1);
4167     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
4168     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
4169 
4170     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
4171     __ andcc(to, 7, G1);
4172     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks192);
4173     __ delayed()->edge8n(to, G0, G2);
4174 
4175     // aligned case: store output into the destination array
4176     __ stf(FloatRegisterImpl::D, F48, to, 0);
4177     __ stf(FloatRegisterImpl::D, F50, to, 8);
4178     __ stf(FloatRegisterImpl::D, F60, to, 16);
4179     __ stf(FloatRegisterImpl::D, F62, to, 24);
4180     __ ba_short(L_check_decrypt_loop_end192);
4181 
4182     __ BIND(L_store_misaligned_output_next2_blocks192);
4183     __ mov(8, G4);
4184     __ sub(G4, G1, G4);
4185     __ alignaddr(G4, G0, G4);
4186     __ faligndata(F48, F50, F56); // F56 can be clobbered
4187     __ faligndata(F50, F60, F50);
4188     __ faligndata(F60, F62, F60);
4189     __ faligndata(F62, F48, F48);
4190     __ mov(to, G1);
4191     __ and3(to, -8, to);
4192     __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
4193     __ stf(FloatRegisterImpl::D, F56, to, 8);
4194     __ stf(FloatRegisterImpl::D, F50, to, 16);
4195     __ stf(FloatRegisterImpl::D, F60, to, 24);
4196     __ add(to, 32, to);
4197     __ orn(G0, G2, G2);
4198     __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
4199     __ mov(G1, to);
4200 
4201     __ BIND(L_check_decrypt_loop_end192);
4202     __ add(from, 32, from);
4203     __ add(to, 32, to);
4204     __ subcc(len_reg, 32, len_reg);
4205     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
4206     __ delayed()->nop();
4207     __ ba_short(L_cbcdec_end);
4208 
4209     __ align(OptoLoopAlignment);
4210     __ BIND(L_dec_next2_blocks256);
4211     __ nop();
4212 
4213     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
4214     __ andcc(from, 7, G0);
4215     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks256);
4216     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
4217 
4218     // aligned case: load input into G4, G5, L4 and L5
4219     __ ldx(from,0,G4);
4220     __ ldx(from,8,G5);
4221     __ ldx(from,16,L4);
4222     __ ldx(from,24,L5);
4223     __ ba_short(L_transform_next2_blocks256);
4224 
4225     __ BIND(L_load_misaligned_next2_blocks256);
4226     __ alignaddr(from, G0, from);
4227     // F0, F2, F4, F60, F62 can be clobbered
4228     __ ldf(FloatRegisterImpl::D, from, 0, F0);
4229     __ ldf(FloatRegisterImpl::D, from, 8, F2);
4230     __ ldf(FloatRegisterImpl::D, from, 16, F60);
4231     __ ldf(FloatRegisterImpl::D, from, 24, F62);
4232     __ ldf(FloatRegisterImpl::D, from, 32, F4);
4233     __ faligndata(F0, F2, F0);
4234     __ faligndata(F2, F60, F2);
4235     __ faligndata(F60, F62, F60);
4236     __ faligndata(F62, F4, F62);
4237     __ movdtox(F0, G4);
4238     __ movdtox(F2, G5);
4239     __ movdtox(F60, L4);
4240     __ movdtox(F62, L5);
4241     __ mov(G1, from);
4242 
4243     __ BIND(L_transform_next2_blocks256);
4244     // F0:F2 used for first 16-bytes
4245     __ xor3(L2,G4,G1);
4246     __ movxtod(G1,F0);
4247     __ xor3(L3,G5,G1);
4248     __ movxtod(G1,F2);
4249 
4250     // F60:F62 used for next 16-bytes
4251     __ xor3(L2,L4,G1);
4252     __ movxtod(G1,F60);
4253     __ xor3(L3,L5,G1);
4254     __ movxtod(G1,F62);
4255 
4256     __ aes_dround23(F54, F0, F2, F4);
4257     __ aes_dround01(F52, F0, F2, F6);
4258     __ aes_dround23(F54, F60, F62, F58);
4259     __ aes_dround01(F52, F60, F62, F56);
4260     __ aes_dround23(F50, F6, F4, F2);
4261     __ aes_dround01(F48, F6, F4, F0);
4262     __ aes_dround23(F50, F56, F58, F62);
4263     __ aes_dround01(F48, F56, F58, F60);
4264     // save F48:F54 in temp registers
4265     __ movdtox(F54,G2);
4266     __ movdtox(F52,G3);
4267     __ movdtox(F50,L6);
4268     __ movdtox(F48,G1);
4269     for ( int i = 46;  i >= 14; i -= 8 ) {
4270       __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
4271       __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6);
4272       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
4273       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
4274       __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2);
4275       __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0);
4276       __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
4277       __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
4278     }
4279     // init F48:F54 with F0:F6 values (original key)
4280     __ ldf(FloatRegisterImpl::D, original_key, 0, F48);
4281     __ ldf(FloatRegisterImpl::D, original_key, 8, F50);
4282     __ ldf(FloatRegisterImpl::D, original_key, 16, F52);
4283     __ ldf(FloatRegisterImpl::D, original_key, 24, F54);
4284     __ aes_dround23(F54, F0, F2, F4);
4285     __ aes_dround01(F52, F0, F2, F6);
4286     __ aes_dround23(F54, F60, F62, F58);
4287     __ aes_dround01(F52, F60, F62, F56);
4288     __ aes_dround23_l(F50, F6, F4, F2);
4289     __ aes_dround01_l(F48, F6, F4, F0);
4290     __ aes_dround23_l(F50, F56, F58, F62);
4291     __ aes_dround01_l(F48, F56, F58, F60);
4292     // re-init F48:F54 with their original values
4293     __ movxtod(G2,F54);
4294     __ movxtod(G3,F52);
4295     __ movxtod(L6,F50);
4296     __ movxtod(G1,F48);
4297 
4298     __ movxtod(L0,F6);
4299     __ movxtod(L1,F4);
4300     __ fxor(FloatRegisterImpl::D, F6, F0, F0);
4301     __ fxor(FloatRegisterImpl::D, F4, F2, F2);
4302 
4303     __ movxtod(G4,F56);
4304     __ movxtod(G5,F58);
4305     __ mov(L4,L0);
4306     __ mov(L5,L1);
4307     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
4308     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
4309 
4310     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
4311     __ andcc(to, 7, G1);
4312     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks256);
4313     __ delayed()->edge8n(to, G0, G2);
4314 
4315     // aligned case: store output into the destination array
4316     __ stf(FloatRegisterImpl::D, F0, to, 0);
4317     __ stf(FloatRegisterImpl::D, F2, to, 8);
4318     __ stf(FloatRegisterImpl::D, F60, to, 16);
4319     __ stf(FloatRegisterImpl::D, F62, to, 24);
4320     __ ba_short(L_check_decrypt_loop_end256);
4321 
4322     __ BIND(L_store_misaligned_output_next2_blocks256);
4323     __ mov(8, G4);
4324     __ sub(G4, G1, G4);
4325     __ alignaddr(G4, G0, G4);
4326     __ faligndata(F0, F2, F56); // F56 can be clobbered
4327     __ faligndata(F2, F60, F2);
4328     __ faligndata(F60, F62, F60);
4329     __ faligndata(F62, F0, F0);
4330     __ mov(to, G1);
4331     __ and3(to, -8, to);
4332     __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
4333     __ stf(FloatRegisterImpl::D, F56, to, 8);
4334     __ stf(FloatRegisterImpl::D, F2, to, 16);
4335     __ stf(FloatRegisterImpl::D, F60, to, 24);
4336     __ add(to, 32, to);
4337     __ orn(G0, G2, G2);
4338     __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
4339     __ mov(G1, to);
4340 
4341     __ BIND(L_check_decrypt_loop_end256);
4342     __ add(from, 32, from);
4343     __ add(to, 32, to);
4344     __ subcc(len_reg, 32, len_reg);
4345     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256);
4346     __ delayed()->nop();
4347 
4348     __ BIND(L_cbcdec_end);
4349     // re-init intial vector for next block, 8-byte alignment is guaranteed
4350     __ stx(L0, rvec, 0);
4351     __ stx(L1, rvec, 8);
4352     __ mov(L7, I0);
4353     __ ret();
4354     __ delayed()->restore();
4355 
4356     return start;
4357   }
4358 
4359   address generate_sha1_implCompress(bool multi_block, const char *name) {
4360     __ align(CodeEntryAlignment);
4361     StubCodeMark mark(this, "StubRoutines", name);
4362     address start = __ pc();
4363 
4364     Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop;
4365     int i;
4366 
4367     Register buf   = O0; // byte[] source+offset
4368     Register state = O1; // int[]  SHA.state
4369     Register ofs   = O2; // int    offset
4370     Register limit = O3; // int    limit
4371 
4372     // load state into F0-F4
4373     for (i = 0; i < 5; i++) {
4374       __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i));
4375     }
4376 
4377     __ andcc(buf, 7, G0);
4378     __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input);
4379     __ delayed()->nop();
4380 
4381     __ BIND(L_sha1_loop);
4382     // load buf into F8-F22
4383     for (i = 0; i < 8; i++) {
4384       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4385     }
4386     __ sha1();
4387     if (multi_block) {
4388       __ add(ofs, 64, ofs);
4389       __ add(buf, 64, buf);
4390       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop);
4391       __ mov(ofs, O0); // to be returned
4392     }
4393 
4394     // store F0-F4 into state and return
4395     for (i = 0; i < 4; i++) {
4396       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4397     }
4398     __ retl();
4399     __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10);
4400 
4401     __ BIND(L_sha1_unaligned_input);
4402     __ alignaddr(buf, G0, buf);
4403 
4404     __ BIND(L_sha1_unaligned_input_loop);
4405     // load buf into F8-F22
4406     for (i = 0; i < 9; i++) {
4407       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4408     }
4409     for (i = 0; i < 8; i++) {
4410       __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8));
4411     }
4412     __ sha1();
4413     if (multi_block) {
4414       __ add(ofs, 64, ofs);
4415       __ add(buf, 64, buf);
4416       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop);
4417       __ mov(ofs, O0); // to be returned
4418     }
4419 
4420     // store F0-F4 into state and return
4421     for (i = 0; i < 4; i++) {
4422       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4423     }
4424     __ retl();
4425     __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10);
4426 
4427     return start;
4428   }
4429 
4430   address generate_sha256_implCompress(bool multi_block, const char *name) {
4431     __ align(CodeEntryAlignment);
4432     StubCodeMark mark(this, "StubRoutines", name);
4433     address start = __ pc();
4434 
4435     Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop;
4436     int i;
4437 
4438     Register buf   = O0; // byte[] source+offset
4439     Register state = O1; // int[]  SHA2.state
4440     Register ofs   = O2; // int    offset
4441     Register limit = O3; // int    limit
4442 
4443     // load state into F0-F7
4444     for (i = 0; i < 8; i++) {
4445       __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i));
4446     }
4447 
4448     __ andcc(buf, 7, G0);
4449     __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input);
4450     __ delayed()->nop();
4451 
4452     __ BIND(L_sha256_loop);
4453     // load buf into F8-F22
4454     for (i = 0; i < 8; i++) {
4455       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4456     }
4457     __ sha256();
4458     if (multi_block) {
4459       __ add(ofs, 64, ofs);
4460       __ add(buf, 64, buf);
4461       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop);
4462       __ mov(ofs, O0); // to be returned
4463     }
4464 
4465     // store F0-F7 into state and return
4466     for (i = 0; i < 7; i++) {
4467       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4468     }
4469     __ retl();
4470     __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c);
4471 
4472     __ BIND(L_sha256_unaligned_input);
4473     __ alignaddr(buf, G0, buf);
4474 
4475     __ BIND(L_sha256_unaligned_input_loop);
4476     // load buf into F8-F22
4477     for (i = 0; i < 9; i++) {
4478       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4479     }
4480     for (i = 0; i < 8; i++) {
4481       __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8));
4482     }
4483     __ sha256();
4484     if (multi_block) {
4485       __ add(ofs, 64, ofs);
4486       __ add(buf, 64, buf);
4487       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop);
4488       __ mov(ofs, O0); // to be returned
4489     }
4490 
4491     // store F0-F7 into state and return
4492     for (i = 0; i < 7; i++) {
4493       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4494     }
4495     __ retl();
4496     __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c);
4497 
4498     return start;
4499   }
4500 
4501   address generate_sha512_implCompress(bool multi_block, const char *name) {
4502     __ align(CodeEntryAlignment);
4503     StubCodeMark mark(this, "StubRoutines", name);
4504     address start = __ pc();
4505 
4506     Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop;
4507     int i;
4508 
4509     Register buf   = O0; // byte[] source+offset
4510     Register state = O1; // long[] SHA5.state
4511     Register ofs   = O2; // int    offset
4512     Register limit = O3; // int    limit
4513 
4514     // load state into F0-F14
4515     for (i = 0; i < 8; i++) {
4516       __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2));
4517     }
4518 
4519     __ andcc(buf, 7, G0);
4520     __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input);
4521     __ delayed()->nop();
4522 
4523     __ BIND(L_sha512_loop);
4524     // load buf into F16-F46
4525     for (i = 0; i < 16; i++) {
4526       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16));
4527     }
4528     __ sha512();
4529     if (multi_block) {
4530       __ add(ofs, 128, ofs);
4531       __ add(buf, 128, buf);
4532       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop);
4533       __ mov(ofs, O0); // to be returned
4534     }
4535 
4536     // store F0-F14 into state and return
4537     for (i = 0; i < 7; i++) {
4538       __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
4539     }
4540     __ retl();
4541     __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
4542 
4543     __ BIND(L_sha512_unaligned_input);
4544     __ alignaddr(buf, G0, buf);
4545 
4546     __ BIND(L_sha512_unaligned_input_loop);
4547     // load buf into F16-F46
4548     for (i = 0; i < 17; i++) {
4549       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16));
4550     }
4551     for (i = 0; i < 16; i++) {
4552       __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16));
4553     }
4554     __ sha512();
4555     if (multi_block) {
4556       __ add(ofs, 128, ofs);
4557       __ add(buf, 128, buf);
4558       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop);
4559       __ mov(ofs, O0); // to be returned
4560     }
4561 
4562     // store F0-F14 into state and return
4563     for (i = 0; i < 7; i++) {
4564       __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
4565     }
4566     __ retl();
4567     __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
4568 
4569     return start;
4570   }
4571 
4572   /* Single and multi-block ghash operations */
4573   address generate_ghash_processBlocks() {
4574       __ align(CodeEntryAlignment);
4575       Label L_ghash_loop, L_aligned, L_main;
4576       StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
4577       address start = __ pc();
4578 
4579       Register state = I0;
4580       Register subkeyH = I1;
4581       Register data = I2;
4582       Register len = I3;
4583 
4584       __ save_frame(0);
4585 
4586       __ ldx(state, 0, O0);
4587       __ ldx(state, 8, O1);
4588 
4589       // Loop label for multiblock operations
4590       __ BIND(L_ghash_loop);
4591 
4592       // Check if 'data' is unaligned
4593       __ andcc(data, 7, G1);
4594       __ br(Assembler::zero, false, Assembler::pt, L_aligned);
4595       __ delayed()->nop();
4596 
4597       Register left_shift = L1;
4598       Register right_shift = L2;
4599       Register data_ptr = L3;
4600 
4601       // Get left and right shift values in bits
4602       __ sll(G1, LogBitsPerByte, left_shift);
4603       __ mov(64, right_shift);
4604       __ sub(right_shift, left_shift, right_shift);
4605 
4606       // Align to read 'data'
4607       __ sub(data, G1, data_ptr);
4608 
4609       // Load first 8 bytes of 'data'
4610       __ ldx(data_ptr, 0, O4);
4611       __ sllx(O4, left_shift, O4);
4612       __ ldx(data_ptr, 8, O5);
4613       __ srlx(O5, right_shift, G4);
4614       __ bset(G4, O4);
4615 
4616       // Load second 8 bytes of 'data'
4617       __ sllx(O5, left_shift, O5);
4618       __ ldx(data_ptr, 16, G4);
4619       __ srlx(G4, right_shift, G4);
4620       __ ba(L_main);
4621       __ delayed()->bset(G4, O5);
4622 
4623       // If 'data' is aligned, load normally
4624       __ BIND(L_aligned);
4625       __ ldx(data, 0, O4);
4626       __ ldx(data, 8, O5);
4627 
4628       __ BIND(L_main);
4629       __ ldx(subkeyH, 0, O2);
4630       __ ldx(subkeyH, 8, O3);
4631 
4632       __ xor3(O0, O4, O0);
4633       __ xor3(O1, O5, O1);
4634 
4635       __ xmulxhi(O0, O3, G3);
4636       __ xmulx(O0, O2, O5);
4637       __ xmulxhi(O1, O2, G4);
4638       __ xmulxhi(O1, O3, G5);
4639       __ xmulx(O0, O3, G1);
4640       __ xmulx(O1, O3, G2);
4641       __ xmulx(O1, O2, O3);
4642       __ xmulxhi(O0, O2, O4);
4643 
4644       __ mov(0xE1, O0);
4645       __ sllx(O0, 56, O0);
4646 
4647       __ xor3(O5, G3, O5);
4648       __ xor3(O5, G4, O5);
4649       __ xor3(G5, G1, G1);
4650       __ xor3(G1, O3, G1);
4651       __ srlx(G2, 63, O1);
4652       __ srlx(G1, 63, G3);
4653       __ sllx(G2, 63, O3);
4654       __ sllx(G2, 58, O2);
4655       __ xor3(O3, O2, O2);
4656 
4657       __ sllx(G1, 1, G1);
4658       __ or3(G1, O1, G1);
4659 
4660       __ xor3(G1, O2, G1);
4661 
4662       __ sllx(G2, 1, G2);
4663 
4664       __ xmulxhi(G1, O0, O1);
4665       __ xmulx(G1, O0, O2);
4666       __ xmulxhi(G2, O0, O3);
4667       __ xmulx(G2, O0, G1);
4668 
4669       __ xor3(O4, O1, O4);
4670       __ xor3(O5, O2, O5);
4671       __ xor3(O5, O3, O5);
4672 
4673       __ sllx(O4, 1, O2);
4674       __ srlx(O5, 63, O3);
4675 
4676       __ or3(O2, O3, O0);
4677 
4678       __ sllx(O5, 1, O1);
4679       __ srlx(G1, 63, O2);
4680       __ or3(O1, O2, O1);
4681       __ xor3(O1, G3, O1);
4682 
4683       __ deccc(len);
4684       __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop);
4685       __ delayed()->add(data, 16, data);
4686 
4687       __ stx(O0, I0, 0);
4688       __ stx(O1, I0, 8);
4689 
4690       __ ret();
4691       __ delayed()->restore();
4692 
4693       return start;
4694   }
4695 
4696   /**
4697    *  Arguments:
4698    *
4699    * Inputs:
4700    *   O0   - int   crc
4701    *   O1   - byte* buf
4702    *   O2   - int   len
4703    *   O3   - int*  table
4704    *
4705    * Output:
4706    *   O0   - int crc result
4707    */
4708   address generate_updateBytesCRC32C() {
4709     assert(UseCRC32CIntrinsics, "need CRC32C instruction");
4710 
4711     __ align(CodeEntryAlignment);
4712     StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C");
4713     address start = __ pc();
4714 
4715     const Register crc   = O0;  // crc
4716     const Register buf   = O1;  // source java byte array address
4717     const Register len   = O2;  // number of bytes
4718     const Register table = O3;  // byteTable
4719 
4720     __ kernel_crc32c(crc, buf, len, table);
4721 
4722     __ retl();
4723     __ delayed()->nop();
4724 
4725     return start;
4726   }
4727 
4728 #define ADLER32_NUM_TEMPS 16
4729 
4730   /**
4731    *  Arguments:
4732    *
4733    * Inputs:
4734    *   O0   - int   adler
4735    *   O1   - byte* buff
4736    *   O2   - int   len
4737    *
4738    * Output:
4739    *   O0   - int adler result
4740    */
4741   address generate_updateBytesAdler32() {
4742     __ align(CodeEntryAlignment);
4743     StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
4744     address start = __ pc();
4745 
4746     Label L_cleanup_loop, L_cleanup_loop_check;
4747     Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check;
4748     Label L_nmax_check_done;
4749 
4750     // Aliases
4751     Register s1     = O0;
4752     Register s2     = O3;
4753     Register buff   = O1;
4754     Register len    = O2;
4755     Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7};
4756 
4757     // Max number of bytes we can process before having to take the mod
4758     // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
4759     unsigned long NMAX = 0x15B0;
4760 
4761     // Zero-out the upper bits of len
4762     __ clruwu(len);
4763 
4764     // Create the mask 0xFFFF
4765     __ set64(0x00FFFF, O4, O5); // O5 is the temp register
4766 
4767     // s1 is initialized to the lower 16 bits of adler
4768     // s2 is initialized to the upper 16 bits of adler
4769     __ srlx(O0, 16, O5); // adler >> 16
4770     __ and3(O0, O4, s1); // s1  = (adler & 0xFFFF)
4771     __ and3(O5, O4, s2); // s2  = ((adler >> 16) & 0xFFFF)
4772 
4773     // The pipelined loop needs at least 16 elements for 1 iteration
4774     // It does check this, but it is more effective to skip to the cleanup loop
4775     // Setup the constant for cutoff checking
4776     __ mov(15, O4);
4777 
4778     // Check if we are above the cutoff, if not go to the cleanup loop immediately
4779     __ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check);
4780 
4781     // Free up some registers for our use
4782     for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
4783       __ movxtod(temp[i], as_FloatRegister(2*i));
4784     }
4785 
4786     // Loop maintenance stuff is done at the end of the loop, so skip to there
4787     __ ba_short(L_main_loop_check);
4788 
4789     __ BIND(L_main_loop);
4790 
4791     // Prologue for inner loop
4792     __ ldub(buff, 0, L0);
4793     __ dec(O5);
4794 
4795     for (int i = 1; i < 8; i++) {
4796       __ ldub(buff, i, temp[i]);
4797     }
4798 
4799     __ inc(buff, 8);
4800 
4801     // Inner loop processes 16 elements at a time, might never execute if only 16 elements
4802     // to be processed by the outter loop
4803     __ ba_short(L_inner_loop_check);
4804 
4805     __ BIND(L_inner_loop);
4806 
4807     for (int i = 0; i < 8; i++) {
4808       __ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]);
4809       __ add(s1, temp[i], s1);
4810       __ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]);
4811       __ add(s2, s1, s2);
4812     }
4813 
4814     // Original temp 0-7 used and new loads to temp 0-7 issued
4815     // temp 8-15 ready to be consumed
4816     __ add(s1, I0, s1);
4817     __ dec(O5);
4818     __ add(s2, s1, s2);
4819     __ add(s1, I1, s1);
4820     __ inc(buff, 16);
4821     __ add(s2, s1, s2);
4822 
4823     for (int i = 0; i < 6; i++) {
4824       __ add(s1, temp[10+i], s1);
4825       __ add(s2, s1, s2);
4826     }
4827 
4828     __ BIND(L_inner_loop_check);
4829     __ nop();
4830     __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop);
4831 
4832     // Epilogue
4833     for (int i = 0; i < 4; i++) {
4834       __ ldub(buff, (2*i), temp[8+(2*i)]);
4835       __ add(s1, temp[i], s1);
4836       __ ldub(buff, (2*i)+1, temp[8+(2*i)+1]);
4837       __ add(s2, s1, s2);
4838     }
4839 
4840     __ add(s1, temp[4], s1);
4841     __ inc(buff, 8);
4842 
4843     for (int i = 0; i < 11; i++) {
4844       __ add(s2, s1, s2);
4845       __ add(s1, temp[5+i], s1);
4846     }
4847 
4848     __ add(s2, s1, s2);
4849 
4850     // Take the mod for s1 and s2
4851     __ set64(0xFFF1, L0, L1);
4852     __ udivx(s1, L0, L1);
4853     __ udivx(s2, L0, L2);
4854     __ mulx(L0, L1, L1);
4855     __ mulx(L0, L2, L2);
4856     __ sub(s1, L1, s1);
4857     __ sub(s2, L2, s2);
4858 
4859     // Make sure there is something left to process
4860     __ BIND(L_main_loop_check);
4861     __ set64(NMAX, L0, L1);
4862     // k = len < NMAX ? len : NMAX
4863     __ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done);
4864     __ andn(len, 0x0F, L0); // only loop a multiple of 16 times
4865     __ BIND(L_nmax_check_done);
4866     __ mov(L0, O5);
4867     __ sub(len, L0, len); // len -= k
4868 
4869     __ srlx(O5, 4, O5); // multiplies of 16
4870     __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop);
4871 
4872     // Restore anything we used, take the mod one last time, combine and return
4873     // Restore any registers we saved
4874     for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
4875       __ movdtox(as_FloatRegister(2*i), temp[i]);
4876     }
4877 
4878     // There might be nothing left to process
4879     __ ba_short(L_cleanup_loop_check);
4880 
4881     __ BIND(L_cleanup_loop);
4882     __ ldub(buff, 0, O4); // load single byte form buffer
4883     __ inc(buff); // buff++
4884     __ add(s1, O4, s1); // s1 += *buff++;
4885     __ dec(len); // len--
4886     __ add(s1, s2, s2); // s2 += s1;
4887     __ BIND(L_cleanup_loop_check);
4888     __ nop();
4889     __ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop);
4890 
4891     // Take the mod one last time
4892     __ set64(0xFFF1, O1, O2);
4893     __ udivx(s1, O1, O2);
4894     __ udivx(s2, O1, O5);
4895     __ mulx(O1, O2, O2);
4896     __ mulx(O1, O5, O5);
4897     __ sub(s1, O2, s1);
4898     __ sub(s2, O5, s2);
4899 
4900     // Combine lower bits and higher bits
4901     __ sllx(s2, 16, s2); // s2 = s2 << 16
4902     __ or3(s1, s2, s1);  // adler = s2 | s1
4903     // Final return value is in O0
4904     __ retl();
4905     __ delayed()->nop();
4906 
4907     return start;
4908   }
4909 
4910   /**
4911    *  Arguments:
4912    *
4913    * Inputs:
4914    *   O0   - int   crc
4915    *   O1   - byte* buf
4916    *   O2   - int   len
4917    *   O3   - int*  table
4918    *
4919    * Output:
4920    *   O0   - int crc result
4921    */
4922   address generate_updateBytesCRC32() {
4923     assert(UseCRC32Intrinsics, "need VIS3 instructions");
4924 
4925     __ align(CodeEntryAlignment);
4926     StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
4927     address start = __ pc();
4928 
4929     const Register crc   = O0; // crc
4930     const Register buf   = O1; // source java byte array address
4931     const Register len   = O2; // length
4932     const Register table = O3; // crc_table address (reuse register)
4933 
4934     __ kernel_crc32(crc, buf, len, table);
4935 
4936     __ retl();
4937     __ delayed()->nop();
4938 
4939     return start;
4940   }
4941 
4942   /**
4943    * Arguments:
4944    *
4945    * Inputs:
4946    *   I0   - int* x-addr
4947    *   I1   - int  x-len
4948    *   I2   - int* y-addr
4949    *   I3   - int  y-len
4950    *   I4   - int* z-addr   (output vector)
4951    *   I5   - int  z-len
4952    */
4953   address generate_multiplyToLen() {
4954     assert(UseMultiplyToLenIntrinsic, "need VIS3 instructions");
4955 
4956     __ align(CodeEntryAlignment);
4957     StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
4958     address start = __ pc();
4959 
4960     __ save_frame(0);
4961 
4962     const Register xptr = I0; // input address
4963     const Register xlen = I1; // ...and length in 32b-words
4964     const Register yptr = I2; //
4965     const Register ylen = I3; //
4966     const Register zptr = I4; // output address
4967     const Register zlen = I5; // ...and length in 32b-words
4968 
4969     /* The minimal "limb" representation suggest that odd length vectors are as
4970      * likely as even length dittos. This in turn suggests that we need to cope
4971      * with odd/even length arrays and data not aligned properly for 64-bit read
4972      * and write operations. We thus use a number of different kernels:
4973      *
4974      *   if (is_even(x.len) && is_even(y.len))
4975      *      if (is_align64(x) && is_align64(y) && is_align64(z))
4976      *         if (x.len == y.len && 16 <= x.len && x.len <= 64)
4977      *            memv_mult_mpmul(...)
4978      *         else
4979      *            memv_mult_64x64(...)
4980      *      else
4981      *         memv_mult_64x64u(...)
4982      *   else
4983      *      memv_mult_32x32(...)
4984      *
4985      * Here we assume VIS3 support (for 'umulxhi', 'addxc' and 'addxccc').
4986      * In case CBCOND instructions are supported, we will use 'cxbX'. If the
4987      * MPMUL instruction is supported, we will generate a kernel using 'mpmul'
4988      * (for vectors with proper characteristics).
4989      */
4990     const Register tmp0 = L0;
4991     const Register tmp1 = L1;
4992 
4993     Label L_mult_32x32;
4994     Label L_mult_64x64u;
4995     Label L_mult_64x64;
4996     Label L_exit;
4997 
4998     if_both_even(xlen, ylen, tmp0, false, L_mult_32x32);
4999     if_all3_aligned(xptr, yptr, zptr, tmp1, 64, false, L_mult_64x64u);
5000 
5001     if (UseMPMUL) {
5002       if_eq(xlen, ylen, false, L_mult_64x64);
5003       if_in_rng(xlen, 16, 64, tmp0, tmp1, false, L_mult_64x64);
5004 
5005       // 1. Multiply naturally aligned 64b-datums using a generic 'mpmul' kernel,
5006       //    operating on equal length vectors of size [16..64].
5007       gen_mult_mpmul(xlen, xptr, yptr, zptr, L_exit);
5008     }
5009 
5010     // 2. Multiply naturally aligned 64-bit datums (64x64).
5011     __ bind(L_mult_64x64);
5012     gen_mult_64x64(xptr, xlen, yptr, ylen, zptr, zlen, L_exit);
5013 
5014     // 3. Multiply unaligned 64-bit datums (64x64).
5015     __ bind(L_mult_64x64u);
5016     gen_mult_64x64_unaligned(xptr, xlen, yptr, ylen, zptr, zlen, L_exit);
5017 
5018     // 4. Multiply naturally aligned 32-bit datums (32x32).
5019     __ bind(L_mult_32x32);
5020     gen_mult_32x32(xptr, xlen, yptr, ylen, zptr, zlen, L_exit);
5021 
5022     __ bind(L_exit);
5023     __ ret();
5024     __ delayed()->restore();
5025 
5026     return start;
5027   }
5028 
5029   // Additional help functions used by multiplyToLen generation.
5030 
5031   void if_both_even(Register r1, Register r2, Register tmp, bool iseven, Label &L)
5032   {
5033     __ or3(r1, r2, tmp);
5034     __ andcc(tmp, 0x1, tmp);
5035     __ br_icc_zero(iseven, Assembler::pn, L);
5036   }
5037 
5038   void if_all3_aligned(Register r1, Register r2, Register r3,
5039                        Register tmp, uint align, bool isalign, Label &L)
5040   {
5041     __ or3(r1, r2, tmp);
5042     __ or3(r3, tmp, tmp);
5043     __ andcc(tmp, (align - 1), tmp);
5044     __ br_icc_zero(isalign, Assembler::pn, L);
5045   }
5046 
5047   void if_eq(Register x, Register y, bool iseq, Label &L)
5048   {
5049     Assembler::Condition cf = (iseq ? Assembler::equal : Assembler::notEqual);
5050     __ cmp_and_br_short(x, y, cf, Assembler::pt, L);
5051   }
5052 
5053   void if_in_rng(Register x, int lb, int ub, Register t1, Register t2, bool inrng, Label &L)
5054   {
5055     assert(Assembler::is_simm13(lb), "Small ints only!");
5056     assert(Assembler::is_simm13(ub), "Small ints only!");
5057     // Compute (x - lb) * (ub - x) >= 0
5058     // NOTE: With the local use of this routine, we rely on small integers to
5059     //       guarantee that we do not overflow in the multiplication.
5060     __ add(G0, ub, t2);
5061     __ sub(x, lb, t1);
5062     __ sub(t2, x, t2);
5063     __ mulx(t1, t2, t1);
5064     Assembler::Condition cf = (inrng ? Assembler::greaterEqual : Assembler::less);
5065     __ cmp_and_br_short(t1, G0, cf, Assembler::pt, L);
5066   }
5067 
5068   void ldd_entry(Register base, Register offs, FloatRegister dest)
5069   {
5070     __ ldd(base, offs, dest);
5071     __ inc(offs, 8);
5072   }
5073 
5074   void ldx_entry(Register base, Register offs, Register dest)
5075   {
5076     __ ldx(base, offs, dest);
5077     __ inc(offs, 8);
5078   }
5079 
5080   void mpmul_entry(int m, Label &next)
5081   {
5082     __ mpmul(m);
5083     __ cbcond(Assembler::equal, Assembler::icc, G0, G0, next);
5084   }
5085 
5086   void stx_entry(Label &L, Register r1, Register r2, Register base, Register offs)
5087   {
5088     __ bind(L);
5089     __ stx(r1, base, offs);
5090     __ inc(offs, 8);
5091     __ stx(r2, base, offs);
5092     __ inc(offs, 8);
5093   }
5094 
5095   void offs_entry(Label &Lbl0, Label &Lbl1)
5096   {
5097     assert(Lbl0.is_bound(), "must be");
5098     assert(Lbl1.is_bound(), "must be");
5099 
5100     int offset = Lbl0.loc_pos() - Lbl1.loc_pos();
5101 
5102     __ emit_data(offset);
5103   }
5104 
5105   /* Generate the actual multiplication kernels for BigInteger vectors:
5106    *
5107    *   1. gen_mult_mpmul(...)
5108    *
5109    *   2. gen_mult_64x64(...)
5110    *
5111    *   3. gen_mult_64x64_unaligned(...)
5112    *
5113    *   4. gen_mult_32x32(...)
5114    */
5115   void gen_mult_mpmul(Register len, Register xptr, Register yptr, Register zptr,
5116                       Label &L_exit)
5117   {
5118     const Register zero = G0;
5119     const Register gxp  = G1;   // Need to use global registers across RWs.
5120     const Register gyp  = G2;
5121     const Register gzp  = G3;
5122     const Register disp = G4;
5123     const Register offs = G5;
5124 
5125     __ mov(xptr, gxp);
5126     __ mov(yptr, gyp);
5127     __ mov(zptr, gzp);
5128 
5129     /* Compute jump vector entry:
5130      *
5131      *   1. mpmul input size (0..31) x 64b
5132      *   2. vector input size in 32b limbs (even number)
5133      *   3. branch entries in reverse order (31..0), using two
5134      *      instructions per entry (2 * 4 bytes).
5135      *
5136      *   displacement = byte_offset(bra_offset(len))
5137      *                = byte_offset((64 - len)/2)
5138      *                = 8 * (64 - len)/2
5139      *                = 4 * (64 - len)
5140      */
5141     Register temp = I5;         // Alright to use input regs. in first batch.
5142 
5143     __ sub(zero, len, temp);
5144     __ add(temp, 64, temp);
5145     __ sllx(temp, 2, disp);     // disp := (64 - len) << 2
5146 
5147     // Dispatch relative current PC, into instruction table below.
5148     __ rdpc(temp);
5149     __ add(temp, 16, temp);
5150     __ jmp(temp, disp);
5151     __ delayed()->clr(offs);
5152 
5153     ldd_entry(gxp, offs, F22);
5154     ldd_entry(gxp, offs, F20);
5155     ldd_entry(gxp, offs, F18);
5156     ldd_entry(gxp, offs, F16);
5157     ldd_entry(gxp, offs, F14);
5158     ldd_entry(gxp, offs, F12);
5159     ldd_entry(gxp, offs, F10);
5160     ldd_entry(gxp, offs, F8);
5161     ldd_entry(gxp, offs, F6);
5162     ldd_entry(gxp, offs, F4);
5163     ldx_entry(gxp, offs, I5);
5164     ldx_entry(gxp, offs, I4);
5165     ldx_entry(gxp, offs, I3);
5166     ldx_entry(gxp, offs, I2);
5167     ldx_entry(gxp, offs, I1);
5168     ldx_entry(gxp, offs, I0);
5169     ldx_entry(gxp, offs, L7);
5170     ldx_entry(gxp, offs, L6);
5171     ldx_entry(gxp, offs, L5);
5172     ldx_entry(gxp, offs, L4);
5173     ldx_entry(gxp, offs, L3);
5174     ldx_entry(gxp, offs, L2);
5175     ldx_entry(gxp, offs, L1);
5176     ldx_entry(gxp, offs, L0);
5177     ldd_entry(gxp, offs, F2);
5178     ldd_entry(gxp, offs, F0);
5179     ldx_entry(gxp, offs, O5);
5180     ldx_entry(gxp, offs, O4);
5181     ldx_entry(gxp, offs, O3);
5182     ldx_entry(gxp, offs, O2);
5183     ldx_entry(gxp, offs, O1);
5184     ldx_entry(gxp, offs, O0);
5185 
5186     __ save(SP, -176, SP);
5187 
5188     const Register addr = gxp;  // Alright to reuse 'gxp'.
5189 
5190     // Dispatch relative current PC, into instruction table below.
5191     __ rdpc(addr);
5192     __ add(addr, 16, addr);
5193     __ jmp(addr, disp);
5194     __ delayed()->clr(offs);
5195 
5196     ldd_entry(gyp, offs, F58);
5197     ldd_entry(gyp, offs, F56);
5198     ldd_entry(gyp, offs, F54);
5199     ldd_entry(gyp, offs, F52);
5200     ldd_entry(gyp, offs, F50);
5201     ldd_entry(gyp, offs, F48);
5202     ldd_entry(gyp, offs, F46);
5203     ldd_entry(gyp, offs, F44);
5204     ldd_entry(gyp, offs, F42);
5205     ldd_entry(gyp, offs, F40);
5206     ldd_entry(gyp, offs, F38);
5207     ldd_entry(gyp, offs, F36);
5208     ldd_entry(gyp, offs, F34);
5209     ldd_entry(gyp, offs, F32);
5210     ldd_entry(gyp, offs, F30);
5211     ldd_entry(gyp, offs, F28);
5212     ldd_entry(gyp, offs, F26);
5213     ldd_entry(gyp, offs, F24);
5214     ldx_entry(gyp, offs, O5);
5215     ldx_entry(gyp, offs, O4);
5216     ldx_entry(gyp, offs, O3);
5217     ldx_entry(gyp, offs, O2);
5218     ldx_entry(gyp, offs, O1);
5219     ldx_entry(gyp, offs, O0);
5220     ldx_entry(gyp, offs, L7);
5221     ldx_entry(gyp, offs, L6);
5222     ldx_entry(gyp, offs, L5);
5223     ldx_entry(gyp, offs, L4);
5224     ldx_entry(gyp, offs, L3);
5225     ldx_entry(gyp, offs, L2);
5226     ldx_entry(gyp, offs, L1);
5227     ldx_entry(gyp, offs, L0);
5228 
5229     __ save(SP, -176, SP);
5230     __ save(SP, -176, SP);
5231     __ save(SP, -176, SP);
5232     __ save(SP, -176, SP);
5233     __ save(SP, -176, SP);
5234 
5235     Label L_mpmul_restore_4, L_mpmul_restore_3, L_mpmul_restore_2;
5236     Label L_mpmul_restore_1, L_mpmul_restore_0;
5237 
5238     // Dispatch relative current PC, into instruction table below.
5239     __ rdpc(addr);
5240     __ add(addr, 16, addr);
5241     __ jmp(addr, disp);
5242     __ delayed()->clr(offs);
5243 
5244     mpmul_entry(31, L_mpmul_restore_0);
5245     mpmul_entry(30, L_mpmul_restore_0);
5246     mpmul_entry(29, L_mpmul_restore_0);
5247     mpmul_entry(28, L_mpmul_restore_0);
5248     mpmul_entry(27, L_mpmul_restore_1);
5249     mpmul_entry(26, L_mpmul_restore_1);
5250     mpmul_entry(25, L_mpmul_restore_1);
5251     mpmul_entry(24, L_mpmul_restore_1);
5252     mpmul_entry(23, L_mpmul_restore_1);
5253     mpmul_entry(22, L_mpmul_restore_1);
5254     mpmul_entry(21, L_mpmul_restore_1);
5255     mpmul_entry(20, L_mpmul_restore_2);
5256     mpmul_entry(19, L_mpmul_restore_2);
5257     mpmul_entry(18, L_mpmul_restore_2);
5258     mpmul_entry(17, L_mpmul_restore_2);
5259     mpmul_entry(16, L_mpmul_restore_2);
5260     mpmul_entry(15, L_mpmul_restore_2);
5261     mpmul_entry(14, L_mpmul_restore_2);
5262     mpmul_entry(13, L_mpmul_restore_3);
5263     mpmul_entry(12, L_mpmul_restore_3);
5264     mpmul_entry(11, L_mpmul_restore_3);
5265     mpmul_entry(10, L_mpmul_restore_3);
5266     mpmul_entry( 9, L_mpmul_restore_3);
5267     mpmul_entry( 8, L_mpmul_restore_3);
5268     mpmul_entry( 7, L_mpmul_restore_3);
5269     mpmul_entry( 6, L_mpmul_restore_4);
5270     mpmul_entry( 5, L_mpmul_restore_4);
5271     mpmul_entry( 4, L_mpmul_restore_4);
5272     mpmul_entry( 3, L_mpmul_restore_4);
5273     mpmul_entry( 2, L_mpmul_restore_4);
5274     mpmul_entry( 1, L_mpmul_restore_4);
5275     mpmul_entry( 0, L_mpmul_restore_4);
5276 
5277     Label L_z31, L_z30, L_z29, L_z28, L_z27, L_z26, L_z25, L_z24;
5278     Label L_z23, L_z22, L_z21, L_z20, L_z19, L_z18, L_z17, L_z16;
5279     Label L_z15, L_z14, L_z13, L_z12, L_z11, L_z10, L_z09, L_z08;
5280     Label L_z07, L_z06, L_z05, L_z04, L_z03, L_z02, L_z01, L_z00;
5281 
5282     Label L_zst_base;    // Store sequence base address.
5283     __ bind(L_zst_base);
5284 
5285     stx_entry(L_z31, L7, L6, gzp, offs);
5286     stx_entry(L_z30, L5, L4, gzp, offs);
5287     stx_entry(L_z29, L3, L2, gzp, offs);
5288     stx_entry(L_z28, L1, L0, gzp, offs);
5289     __ restore();
5290     stx_entry(L_z27, O5, O4, gzp, offs);
5291     stx_entry(L_z26, O3, O2, gzp, offs);
5292     stx_entry(L_z25, O1, O0, gzp, offs);
5293     stx_entry(L_z24, L7, L6, gzp, offs);
5294     stx_entry(L_z23, L5, L4, gzp, offs);
5295     stx_entry(L_z22, L3, L2, gzp, offs);
5296     stx_entry(L_z21, L1, L0, gzp, offs);
5297     __ restore();
5298     stx_entry(L_z20, O5, O4, gzp, offs);
5299     stx_entry(L_z19, O3, O2, gzp, offs);
5300     stx_entry(L_z18, O1, O0, gzp, offs);
5301     stx_entry(L_z17, L7, L6, gzp, offs);
5302     stx_entry(L_z16, L5, L4, gzp, offs);
5303     stx_entry(L_z15, L3, L2, gzp, offs);
5304     stx_entry(L_z14, L1, L0, gzp, offs);
5305     __ restore();
5306     stx_entry(L_z13, O5, O4, gzp, offs);
5307     stx_entry(L_z12, O3, O2, gzp, offs);
5308     stx_entry(L_z11, O1, O0, gzp, offs);
5309     stx_entry(L_z10, L7, L6, gzp, offs);
5310     stx_entry(L_z09, L5, L4, gzp, offs);
5311     stx_entry(L_z08, L3, L2, gzp, offs);
5312     stx_entry(L_z07, L1, L0, gzp, offs);
5313     __ restore();
5314     stx_entry(L_z06, O5, O4, gzp, offs);
5315     stx_entry(L_z05, O3, O2, gzp, offs);
5316     stx_entry(L_z04, O1, O0, gzp, offs);
5317     stx_entry(L_z03, L7, L6, gzp, offs);
5318     stx_entry(L_z02, L5, L4, gzp, offs);
5319     stx_entry(L_z01, L3, L2, gzp, offs);
5320     stx_entry(L_z00, L1, L0, gzp, offs);
5321 
5322     __ restore();
5323     __ restore();
5324     // Exit out of 'mpmul' routine, back to multiplyToLen.
5325     __ ba_short(L_exit);
5326 
5327     Label L_zst_offs;
5328     __ bind(L_zst_offs);
5329 
5330     offs_entry(L_z31, L_zst_base);  // index 31: 2048x2048
5331     offs_entry(L_z30, L_zst_base);
5332     offs_entry(L_z29, L_zst_base);
5333     offs_entry(L_z28, L_zst_base);
5334     offs_entry(L_z27, L_zst_base);
5335     offs_entry(L_z26, L_zst_base);
5336     offs_entry(L_z25, L_zst_base);
5337     offs_entry(L_z24, L_zst_base);
5338     offs_entry(L_z23, L_zst_base);
5339     offs_entry(L_z22, L_zst_base);
5340     offs_entry(L_z21, L_zst_base);
5341     offs_entry(L_z20, L_zst_base);
5342     offs_entry(L_z19, L_zst_base);
5343     offs_entry(L_z18, L_zst_base);
5344     offs_entry(L_z17, L_zst_base);
5345     offs_entry(L_z16, L_zst_base);
5346     offs_entry(L_z15, L_zst_base);
5347     offs_entry(L_z14, L_zst_base);
5348     offs_entry(L_z13, L_zst_base);
5349     offs_entry(L_z12, L_zst_base);
5350     offs_entry(L_z11, L_zst_base);
5351     offs_entry(L_z10, L_zst_base);
5352     offs_entry(L_z09, L_zst_base);
5353     offs_entry(L_z08, L_zst_base);
5354     offs_entry(L_z07, L_zst_base);
5355     offs_entry(L_z06, L_zst_base);
5356     offs_entry(L_z05, L_zst_base);
5357     offs_entry(L_z04, L_zst_base);
5358     offs_entry(L_z03, L_zst_base);
5359     offs_entry(L_z02, L_zst_base);
5360     offs_entry(L_z01, L_zst_base);
5361     offs_entry(L_z00, L_zst_base);  // index  0:   64x64
5362 
5363     __ bind(L_mpmul_restore_4);
5364     __ restore();
5365     __ bind(L_mpmul_restore_3);
5366     __ restore();
5367     __ bind(L_mpmul_restore_2);
5368     __ restore();
5369     __ bind(L_mpmul_restore_1);
5370     __ restore();
5371     __ bind(L_mpmul_restore_0);
5372 
5373     // Dispatch via offset vector entry, into z-store sequence.
5374     Label L_zst_rdpc;
5375     __ bind(L_zst_rdpc);
5376 
5377     assert(L_zst_base.is_bound(), "must be");
5378     assert(L_zst_offs.is_bound(), "must be");
5379     assert(L_zst_rdpc.is_bound(), "must be");
5380 
5381     int dbase = L_zst_rdpc.loc_pos() - L_zst_base.loc_pos();
5382     int doffs = L_zst_rdpc.loc_pos() - L_zst_offs.loc_pos();
5383 
5384     temp = gyp;   // Alright to reuse 'gyp'.
5385 
5386     __ rdpc(addr);
5387     __ sub(addr, doffs, temp);
5388     __ srlx(disp, 1, disp);
5389     __ lduw(temp, disp, offs);
5390     __ sub(addr, dbase, temp);
5391     __ jmp(temp, offs);
5392     __ delayed()->clr(offs);
5393   }
5394 
5395   void gen_mult_64x64(Register xp, Register xn,
5396                       Register yp, Register yn,
5397                       Register zp, Register zn, Label &L_exit)
5398   {
5399     // Assuming that a stack frame has already been created, i.e. local and
5400     // output registers are available for immediate use.
5401 
5402     const Register ri = L0;     // Outer loop index, xv[i]
5403     const Register rj = L1;     // Inner loop index, yv[j]
5404     const Register rk = L2;     // Output loop index, zv[k]
5405     const Register rx = L4;     // x-vector datum [i]
5406     const Register ry = L5;     // y-vector datum [j]
5407     const Register rz = L6;     // z-vector datum [k]
5408     const Register rc = L7;     // carry over (to z-vector datum [k-1])
5409 
5410     const Register lop = O0;    // lo-64b product
5411     const Register hip = O1;    // hi-64b product
5412 
5413     const Register zero = G0;
5414 
5415     Label L_loop_i,  L_exit_loop_i;
5416     Label L_loop_j;
5417     Label L_loop_i2, L_exit_loop_i2;
5418 
5419     __ srlx(xn, 1, xn);         // index for u32 to u64 ditto
5420     __ srlx(yn, 1, yn);         // index for u32 to u64 ditto
5421     __ srlx(zn, 1, zn);         // index for u32 to u64 ditto
5422     __ dec(xn);                 // Adjust [0..(N/2)-1]
5423     __ dec(yn);
5424     __ dec(zn);
5425     __ clr(rc);                 // u64 c = 0
5426     __ sllx(xn, 3, ri);         // int i = xn (byte offset i = 8*xn)
5427     __ sllx(yn, 3, rj);         // int j = yn (byte offset i = 8*xn)
5428     __ sllx(zn, 3, rk);         // int k = zn (byte offset k = 8*zn)
5429     __ ldx(yp, rj, ry);         // u64 y = yp[yn]
5430 
5431     // for (int i = xn; i >= 0; i--)
5432     __ bind(L_loop_i);
5433 
5434     __ cmp_and_br_short(ri, 0,  // i >= 0
5435                         Assembler::less, Assembler::pn, L_exit_loop_i);
5436     __ ldx(xp, ri, rx);         // x = xp[i]
5437     __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
5438     __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
5439     __ addcc(rc, lop, lop);     // Accumulate lower order bits (producing carry)
5440     __ addxc(hip, zero, rc);    // carry over to next datum [k-1]
5441     __ stx(lop, zp, rk);        // z[k] = lop
5442     __ dec(rk, 8);              // k--
5443     __ dec(ri, 8);              // i--
5444     __ ba_short(L_loop_i);
5445 
5446     __ bind(L_exit_loop_i);
5447     __ stx(rc, zp, rk);         // z[k] = c
5448 
5449     // for (int j = yn - 1; j >= 0; j--)
5450     __ sllx(yn, 3, rj);         // int j = yn - 1 (byte offset j = 8*yn)
5451     __ dec(rj, 8);
5452 
5453     __ bind(L_loop_j);
5454 
5455     __ cmp_and_br_short(rj, 0,  // j >= 0
5456                         Assembler::less, Assembler::pn, L_exit);
5457     __ clr(rc);                 // u64 c = 0
5458     __ ldx(yp, rj, ry);         // u64 y = yp[j]
5459 
5460     // for (int i = xn, k = --zn; i >= 0; i--)
5461     __ dec(zn);                 // --zn
5462     __ sllx(xn, 3, ri);         // int i = xn (byte offset i = 8*xn)
5463     __ sllx(zn, 3, rk);         // int k = zn (byte offset k = 8*zn)
5464 
5465     __ bind(L_loop_i2);
5466 
5467     __ cmp_and_br_short(ri, 0,  // i >= 0
5468                         Assembler::less, Assembler::pn, L_exit_loop_i2);
5469     __ ldx(xp, ri, rx);         // x = xp[i]
5470     __ ldx(zp, rk, rz);         // z = zp[k], accumulator
5471     __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
5472     __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
5473     __ addcc(rz, rc, rz);       // Accumulate lower order bits,
5474     __ addxc(hip, zero, rc);    // Accumulate higher order bits to carry
5475     __ addcc(rz, lop, rz);      //    z += lo(p) + c
5476     __ addxc(rc, zero, rc);
5477     __ stx(rz, zp, rk);         // zp[k] = z
5478     __ dec(rk, 8);              // k--
5479     __ dec(ri, 8);              // i--
5480     __ ba_short(L_loop_i2);
5481 
5482     __ bind(L_exit_loop_i2);
5483     __ stx(rc, zp, rk);         // z[k] = c
5484     __ dec(rj, 8);              // j--
5485     __ ba_short(L_loop_j);
5486   }
5487 
5488   void gen_mult_64x64_unaligned(Register xp, Register xn,
5489                                 Register yp, Register yn,
5490                                 Register zp, Register zn, Label &L_exit)
5491   {
5492     // Assuming that a stack frame has already been created, i.e. local and
5493     // output registers are available for use.
5494 
5495     const Register xpc = L0;    // Outer loop cursor, xp[i]
5496     const Register ypc = L1;    // Inner loop cursor, yp[j]
5497     const Register zpc = L2;    // Output loop cursor, zp[k]
5498     const Register rx  = L4;    // x-vector datum [i]
5499     const Register ry  = L5;    // y-vector datum [j]
5500     const Register rz  = L6;    // z-vector datum [k]
5501     const Register rc  = L7;    // carry over (to z-vector datum [k-1])
5502     const Register rt  = O2;
5503 
5504     const Register lop = O0;    // lo-64b product
5505     const Register hip = O1;    // hi-64b product
5506 
5507     const Register zero = G0;
5508 
5509     Label L_loop_i,  L_exit_loop_i;
5510     Label L_loop_j;
5511     Label L_loop_i2, L_exit_loop_i2;
5512 
5513     __ srlx(xn, 1, xn);         // index for u32 to u64 ditto
5514     __ srlx(yn, 1, yn);         // index for u32 to u64 ditto
5515     __ srlx(zn, 1, zn);         // index for u32 to u64 ditto
5516     __ dec(xn);                 // Adjust [0..(N/2)-1]
5517     __ dec(yn);
5518     __ dec(zn);
5519     __ clr(rc);                 // u64 c = 0
5520     __ sllx(xn, 3, xpc);        // u32* xpc = &xp[xn] (byte offset 8*xn)
5521     __ add(xp, xpc, xpc);
5522     __ sllx(yn, 3, ypc);        // u32* ypc = &yp[yn] (byte offset 8*yn)
5523     __ add(yp, ypc, ypc);
5524     __ sllx(zn, 3, zpc);        // u32* zpc = &zp[zn] (byte offset 8*zn)
5525     __ add(zp, zpc, zpc);
5526     __ lduw(ypc, 0, rt);        // u64 y = yp[yn]
5527     __ lduw(ypc, 4, ry);        //   ...
5528     __ sllx(rt, 32, rt);
5529     __ or3(rt, ry, ry);
5530 
5531     // for (int i = xn; i >= 0; i--)
5532     __ bind(L_loop_i);
5533 
5534     __ cmp_and_brx_short(xpc, xp,// i >= 0
5535                          Assembler::lessUnsigned, Assembler::pn, L_exit_loop_i);
5536     __ lduw(xpc, 0, rt);        // u64 x = xp[i]
5537     __ lduw(xpc, 4, rx);        //   ...
5538     __ sllx(rt, 32, rt);
5539     __ or3(rt, rx, rx);
5540     __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
5541     __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
5542     __ addcc(rc, lop, lop);     // Accumulate lower order bits (producing carry)
5543     __ addxc(hip, zero, rc);    // carry over to next datum [k-1]
5544     __ srlx(lop, 32, rt);
5545     __ stw(rt, zpc, 0);         // z[k] = lop
5546     __ stw(lop, zpc, 4);        //   ...
5547     __ dec(zpc, 8);             // k-- (zpc--)
5548     __ dec(xpc, 8);             // i-- (xpc--)
5549     __ ba_short(L_loop_i);
5550 
5551     __ bind(L_exit_loop_i);
5552     __ srlx(rc, 32, rt);
5553     __ stw(rt, zpc, 0);         // z[k] = c
5554     __ stw(rc, zpc, 4);
5555 
5556     // for (int j = yn - 1; j >= 0; j--)
5557     __ sllx(yn, 3, ypc);        // u32* ypc = &yp[yn] (byte offset 8*yn)
5558     __ add(yp, ypc, ypc);
5559     __ dec(ypc, 8);             // yn - 1 (ypc--)
5560 
5561     __ bind(L_loop_j);
5562 
5563     __ cmp_and_brx_short(ypc, yp,// j >= 0
5564                          Assembler::lessUnsigned, Assembler::pn, L_exit);
5565     __ clr(rc);                 // u64 c = 0
5566     __ lduw(ypc, 0, rt);        // u64 y = yp[j] (= *ypc)
5567     __ lduw(ypc, 4, ry);        //   ...
5568     __ sllx(rt, 32, rt);
5569     __ or3(rt, ry, ry);
5570 
5571     // for (int i = xn, k = --zn; i >= 0; i--)
5572     __ sllx(xn, 3, xpc);        // u32* xpc = &xp[xn] (byte offset 8*xn)
5573     __ add(xp, xpc, xpc);
5574     __ dec(zn);                 // --zn
5575     __ sllx(zn, 3, zpc);        // u32* zpc = &zp[zn] (byte offset 8*zn)
5576     __ add(zp, zpc, zpc);
5577 
5578     __ bind(L_loop_i2);
5579 
5580     __ cmp_and_brx_short(xpc, xp,// i >= 0
5581                          Assembler::lessUnsigned, Assembler::pn, L_exit_loop_i2);
5582     __ lduw(xpc, 0, rt);        // u64 x = xp[i] (= *xpc)
5583     __ lduw(xpc, 4, rx);        //   ...
5584     __ sllx(rt, 32, rt);
5585     __ or3(rt, rx, rx);
5586 
5587     __ lduw(zpc, 0, rt);        // u64 z = zp[k] (= *zpc)
5588     __ lduw(zpc, 4, rz);        //   ...
5589     __ sllx(rt, 32, rt);
5590     __ or3(rt, rz, rz);
5591 
5592     __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
5593     __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
5594     __ addcc(rz, rc, rz);       // Accumulate lower order bits...
5595     __ addxc(hip, zero, rc);    // Accumulate higher order bits to carry
5596     __ addcc(rz, lop, rz);      // ... z += lo(p) + c
5597     __ addxccc(rc, zero, rc);
5598     __ srlx(rz, 32, rt);
5599     __ stw(rt, zpc, 0);         // zp[k] = z    (*zpc = z)
5600     __ stw(rz, zpc, 4);
5601     __ dec(zpc, 8);             // k-- (zpc--)
5602     __ dec(xpc, 8);             // i-- (xpc--)
5603     __ ba_short(L_loop_i2);
5604 
5605     __ bind(L_exit_loop_i2);
5606     __ srlx(rc, 32, rt);
5607     __ stw(rt, zpc, 0);         // z[k] = c
5608     __ stw(rc, zpc, 4);
5609     __ dec(ypc, 8);             // j-- (ypc--)
5610     __ ba_short(L_loop_j);
5611   }
5612 
5613   void gen_mult_32x32(Register xp, Register xn,
5614                       Register yp, Register yn,
5615                       Register zp, Register zn, Label &L_exit)
5616   {
5617     // Assuming that a stack frame has already been created, i.e. local and
5618     // output registers are available for use.
5619 
5620     const Register ri = L0;     // Outer loop index, xv[i]
5621     const Register rj = L1;     // Inner loop index, yv[j]
5622     const Register rk = L2;     // Output loop index, zv[k]
5623     const Register rx = L4;     // x-vector datum [i]
5624     const Register ry = L5;     // y-vector datum [j]
5625     const Register rz = L6;     // z-vector datum [k]
5626     const Register rc = L7;     // carry over (to z-vector datum [k-1])
5627 
5628     const Register p64 = O0;    // 64b product
5629     const Register z65 = O1;    // carry+64b accumulator
5630     const Register c65 = O2;    // carry at bit 65
5631     const Register c33 = O2;    // carry at bit 33 (after shift)
5632 
5633     const Register zero = G0;
5634 
5635     Label L_loop_i,  L_exit_loop_i;
5636     Label L_loop_j;
5637     Label L_loop_i2, L_exit_loop_i2;
5638 
5639     __ dec(xn);                 // Adjust [0..N-1]
5640     __ dec(yn);
5641     __ dec(zn);
5642     __ clr(rc);                 // u32 c = 0
5643     __ sllx(xn, 2, ri);         // int i = xn (byte offset i = 4*xn)
5644     __ sllx(yn, 2, rj);         // int j = yn (byte offset i = 4*xn)
5645     __ sllx(zn, 2, rk);         // int k = zn (byte offset k = 4*zn)
5646     __ lduw(yp, rj, ry);        // u32 y = yp[yn]
5647 
5648     // for (int i = xn; i >= 0; i--)
5649     __ bind(L_loop_i);
5650 
5651     __ cmp_and_br_short(ri, 0,  // i >= 0
5652                         Assembler::less, Assembler::pn, L_exit_loop_i);
5653     __ lduw(xp, ri, rx);        // x = xp[i]
5654     __ mulx(rx, ry, p64);       // 64b result of 32x32
5655     __ addcc(rc, p64, z65);     // Accumulate to 65 bits (producing carry)
5656     __ addxc(zero, zero, c65);  // Materialise carry (in bit 65) into lsb,
5657     __ sllx(c65, 32, c33);      // and shift into bit 33
5658     __ srlx(z65, 32, rc);       // carry = c33 | hi(z65) >> 32
5659     __ add(c33, rc, rc);        // carry over to next datum [k-1]
5660     __ stw(z65, zp, rk);        // z[k] = lo(z65)
5661     __ dec(rk, 4);              // k--
5662     __ dec(ri, 4);              // i--
5663     __ ba_short(L_loop_i);
5664 
5665     __ bind(L_exit_loop_i);
5666     __ stw(rc, zp, rk);         // z[k] = c
5667 
5668     // for (int j = yn - 1; j >= 0; j--)
5669     __ sllx(yn, 2, rj);         // int j = yn - 1 (byte offset j = 4*yn)
5670     __ dec(rj, 4);
5671 
5672     __ bind(L_loop_j);
5673 
5674     __ cmp_and_br_short(rj, 0,  // j >= 0
5675                         Assembler::less, Assembler::pn, L_exit);
5676     __ clr(rc);                 // u32 c = 0
5677     __ lduw(yp, rj, ry);        // u32 y = yp[j]
5678 
5679     // for (int i = xn, k = --zn; i >= 0; i--)
5680     __ dec(zn);                 // --zn
5681     __ sllx(xn, 2, ri);         // int i = xn (byte offset i = 4*xn)
5682     __ sllx(zn, 2, rk);         // int k = zn (byte offset k = 4*zn)
5683 
5684     __ bind(L_loop_i2);
5685 
5686     __ cmp_and_br_short(ri, 0,  // i >= 0
5687                         Assembler::less, Assembler::pn, L_exit_loop_i2);
5688     __ lduw(xp, ri, rx);        // x = xp[i]
5689     __ lduw(zp, rk, rz);        // z = zp[k], accumulator
5690     __ mulx(rx, ry, p64);       // 64b result of 32x32
5691     __ add(rz, rc, rz);         // Accumulate lower order bits,
5692     __ addcc(rz, p64, z65);     //   z += lo(p64) + c
5693     __ addxc(zero, zero, c65);  // Materialise carry (in bit 65) into lsb,
5694     __ sllx(c65, 32, c33);      // and shift into bit 33
5695     __ srlx(z65, 32, rc);       // carry = c33 | hi(z65) >> 32
5696     __ add(c33, rc, rc);        // carry over to next datum [k-1]
5697     __ stw(z65, zp, rk);        // zp[k] = lo(z65)
5698     __ dec(rk, 4);              // k--
5699     __ dec(ri, 4);              // i--
5700     __ ba_short(L_loop_i2);
5701 
5702     __ bind(L_exit_loop_i2);
5703     __ stw(rc, zp, rk);         // z[k] = c
5704     __ dec(rj, 4);              // j--
5705     __ ba_short(L_loop_j);
5706   }
5707 
5708 
5709   void generate_initial() {
5710     // Generates all stubs and initializes the entry points
5711 
5712     //------------------------------------------------------------------------------------------------------------------------
5713     // entry points that exist in all platforms
5714     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
5715     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
5716     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
5717 
5718     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
5719     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
5720 
5721     //------------------------------------------------------------------------------------------------------------------------
5722     // entry points that are platform specific
5723     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
5724 
5725     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
5726     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
5727 
5728     // Build this early so it's available for the interpreter.
5729     StubRoutines::_throw_StackOverflowError_entry =
5730             generate_throw_exception("StackOverflowError throw_exception",
5731             CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
5732     StubRoutines::_throw_delayed_StackOverflowError_entry =
5733             generate_throw_exception("delayed StackOverflowError throw_exception",
5734             CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
5735 
5736     if (UseCRC32Intrinsics) {
5737       // set table address before stub generation which use it
5738       StubRoutines::_crc_table_adr = (address)StubRoutines::Sparc::_crc_table;
5739       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
5740     }
5741 
5742     if (UseCRC32CIntrinsics) {
5743       // set table address before stub generation which use it
5744       StubRoutines::_crc32c_table_addr = (address)StubRoutines::Sparc::_crc32c_table;
5745       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
5746     }
5747 
5748     // Safefetch stubs.
5749     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
5750                                                        &StubRoutines::_safefetch32_fault_pc,
5751                                                        &StubRoutines::_safefetch32_continuation_pc);
5752     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
5753                                                        &StubRoutines::_safefetchN_fault_pc,
5754                                                        &StubRoutines::_safefetchN_continuation_pc);
5755   }
5756 
5757 
5758   void generate_all() {
5759     // Generates all stubs and initializes the entry points
5760 
5761     // Generate partial_subtype_check first here since its code depends on
5762     // UseZeroBaseCompressedOops which is defined after heap initialization.
5763     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
5764     // These entry points require SharedInfo::stack0 to be set up in non-core builds
5765     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
5766     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
5767     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
5768 
5769     // support for verify_oop (must happen after universe_init)
5770     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
5771 
5772     // arraycopy stubs used by compilers
5773     generate_arraycopy_stubs();
5774 
5775     // Don't initialize the platform math functions since sparc
5776     // doesn't have intrinsics for these operations.
5777 
5778     // generate AES intrinsics code
5779     if (UseAESIntrinsics) {
5780       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
5781       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
5782       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
5783       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
5784     }
5785     // generate GHASH intrinsics code
5786     if (UseGHASHIntrinsics) {
5787       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
5788     }
5789 
5790     // generate SHA1/SHA256/SHA512 intrinsics code
5791     if (UseSHA1Intrinsics) {
5792       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
5793       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
5794     }
5795     if (UseSHA256Intrinsics) {
5796       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5797       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5798     }
5799     if (UseSHA512Intrinsics) {
5800       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
5801       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true,  "sha512_implCompressMB");
5802     }
5803     // generate Adler32 intrinsics code
5804     if (UseAdler32Intrinsics) {
5805       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5806     }
5807 
5808 #ifdef COMPILER2
5809     // Intrinsics supported by C2 only:
5810     if (UseMultiplyToLenIntrinsic) {
5811       StubRoutines::_multiplyToLen = generate_multiplyToLen();
5812     }
5813 #endif // COMPILER2
5814   }
5815 
5816  public:
5817   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
5818     // replace the standard masm with a special one:
5819     _masm = new MacroAssembler(code);
5820 
5821     _stub_count = !all ? 0x100 : 0x200;
5822     if (all) {
5823       generate_all();
5824     } else {
5825       generate_initial();
5826     }
5827 
5828     // make sure this stub is available for all local calls
5829     if (_atomic_add_stub.is_unbound()) {
5830       // generate a second time, if necessary
5831       (void) generate_atomic_add();
5832     }
5833   }
5834 
5835 
5836  private:
5837   int _stub_count;
5838   void stub_prolog(StubCodeDesc* cdesc) {
5839     # ifdef ASSERT
5840       // put extra information in the stub code, to make it more readable
5841       // Write the high part of the address
5842       // [RGV] Check if there is a dependency on the size of this prolog
5843       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
5844       __ emit_data((intptr_t)cdesc,    relocInfo::none);
5845       __ emit_data(++_stub_count, relocInfo::none);
5846     # endif
5847     align(true);
5848   }
5849 
5850   void align(bool at_header = false) {
5851     // %%%%% move this constant somewhere else
5852     // UltraSPARC cache line size is 8 instructions:
5853     const unsigned int icache_line_size = 32;
5854     const unsigned int icache_half_line_size = 16;
5855 
5856     if (at_header) {
5857       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
5858         __ emit_data(0, relocInfo::none);
5859       }
5860     } else {
5861       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
5862         __ nop();
5863       }
5864     }
5865   }
5866 
5867 }; // end class declaration
5868 
5869 #define UCM_TABLE_MAX_ENTRIES 8
5870 void StubGenerator_generate(CodeBuffer* code, bool all) {
5871   if (UnsafeCopyMemory::_table == NULL) {
5872     UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
5873   }
5874   StubGenerator g(code, all);
5875 }