1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "nativeInst_sparc.hpp"
  29 #include "oops/instanceOop.hpp"
  30 #include "oops/method.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "prims/methodHandles.hpp"
  34 #include "runtime/frame.inline.hpp"
  35 #include "runtime/handles.inline.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubCodeGenerator.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.inline.hpp"
  40 #ifdef COMPILER2
  41 #include "opto/runtime.hpp"
  42 #endif
  43 
  44 // Declaration and definition of StubGenerator (no .hpp file).
  45 // For a more detailed description of the stub routine structure
  46 // see the comment in stubRoutines.hpp.
  47 
  48 #define __ _masm->
  49 
  50 #ifdef PRODUCT
  51 #define BLOCK_COMMENT(str) /* nothing */
  52 #else
  53 #define BLOCK_COMMENT(str) __ block_comment(str)
  54 #endif
  55 
  56 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  57 
  58 // Note:  The register L7 is used as L7_thread_cache, and may not be used
  59 //        any other way within this module.
  60 
  61 
  62 static const Register& Lstub_temp = L2;
  63 
  64 // -------------------------------------------------------------------------------------------------------------------------
  65 // Stub Code definitions
  66 
  67 class StubGenerator: public StubCodeGenerator {
  68  private:
  69 
  70 #ifdef PRODUCT
  71 #define inc_counter_np(a,b,c)
  72 #else
  73 #define inc_counter_np(counter, t1, t2) \
  74   BLOCK_COMMENT("inc_counter " #counter); \
  75   __ inc_counter(&counter, t1, t2);
  76 #endif
  77 
  78   //----------------------------------------------------------------------------------------------------
  79   // Call stubs are used to call Java from C
  80 
  81   address generate_call_stub(address& return_pc) {
  82     StubCodeMark mark(this, "StubRoutines", "call_stub");
  83     address start = __ pc();
  84 
  85     // Incoming arguments:
  86     //
  87     // o0         : call wrapper address
  88     // o1         : result (address)
  89     // o2         : result type
  90     // o3         : method
  91     // o4         : (interpreter) entry point
  92     // o5         : parameters (address)
  93     // [sp + 0x5c]: parameter size (in words)
  94     // [sp + 0x60]: thread
  95     //
  96     // +---------------+ <--- sp + 0
  97     // |               |
  98     // . reg save area .
  99     // |               |
 100     // +---------------+ <--- sp + 0x40
 101     // |               |
 102     // . extra 7 slots .
 103     // |               |
 104     // +---------------+ <--- sp + 0x5c
 105     // |  param. size  |
 106     // +---------------+ <--- sp + 0x60
 107     // |    thread     |
 108     // +---------------+
 109     // |               |
 110 
 111     // note: if the link argument position changes, adjust
 112     //       the code in frame::entry_frame_call_wrapper()
 113 
 114     const Argument link           = Argument(0, false); // used only for GC
 115     const Argument result         = Argument(1, false);
 116     const Argument result_type    = Argument(2, false);
 117     const Argument method         = Argument(3, false);
 118     const Argument entry_point    = Argument(4, false);
 119     const Argument parameters     = Argument(5, false);
 120     const Argument parameter_size = Argument(6, false);
 121     const Argument thread         = Argument(7, false);
 122 
 123     // setup thread register
 124     __ ld_ptr(thread.as_address(), G2_thread);
 125     __ reinit_heapbase();
 126 
 127 #ifdef ASSERT
 128     // make sure we have no pending exceptions
 129     { const Register t = G3_scratch;
 130       Label L;
 131       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
 132       __ br_null_short(t, Assembler::pt, L);
 133       __ stop("StubRoutines::call_stub: entered with pending exception");
 134       __ bind(L);
 135     }
 136 #endif
 137 
 138     // create activation frame & allocate space for parameters
 139     { const Register t = G3_scratch;
 140       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
 141       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
 142       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
 143       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
 144       __ neg(t);                                                // negate so it can be used with save
 145       __ save(SP, t, SP);                                       // setup new frame
 146     }
 147 
 148     // +---------------+ <--- sp + 0
 149     // |               |
 150     // . reg save area .
 151     // |               |
 152     // +---------------+ <--- sp + 0x40
 153     // |               |
 154     // . extra 7 slots .
 155     // |               |
 156     // +---------------+ <--- sp + 0x5c
 157     // |  empty slot   |      (only if parameter size is even)
 158     // +---------------+
 159     // |               |
 160     // .  parameters   .
 161     // |               |
 162     // +---------------+ <--- fp + 0
 163     // |               |
 164     // . reg save area .
 165     // |               |
 166     // +---------------+ <--- fp + 0x40
 167     // |               |
 168     // . extra 7 slots .
 169     // |               |
 170     // +---------------+ <--- fp + 0x5c
 171     // |  param. size  |
 172     // +---------------+ <--- fp + 0x60
 173     // |    thread     |
 174     // +---------------+
 175     // |               |
 176 
 177     // pass parameters if any
 178     BLOCK_COMMENT("pass parameters if any");
 179     { const Register src = parameters.as_in().as_register();
 180       const Register dst = Lentry_args;
 181       const Register tmp = G3_scratch;
 182       const Register cnt = G4_scratch;
 183 
 184       // test if any parameters & setup of Lentry_args
 185       Label exit;
 186       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
 187       __ add( FP, STACK_BIAS, dst );
 188       __ cmp_zero_and_br(Assembler::zero, cnt, exit);
 189       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
 190 
 191       // copy parameters if any
 192       Label loop;
 193       __ BIND(loop);
 194       // Store parameter value
 195       __ ld_ptr(src, 0, tmp);
 196       __ add(src, BytesPerWord, src);
 197       __ st_ptr(tmp, dst, 0);
 198       __ deccc(cnt);
 199       __ br(Assembler::greater, false, Assembler::pt, loop);
 200       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 201 
 202       // done
 203       __ BIND(exit);
 204     }
 205 
 206     // setup parameters, method & call Java function
 207 #ifdef ASSERT
 208     // layout_activation_impl checks it's notion of saved SP against
 209     // this register, so if this changes update it as well.
 210     const Register saved_SP = Lscratch;
 211     __ mov(SP, saved_SP);                               // keep track of SP before call
 212 #endif
 213 
 214     // setup parameters
 215     const Register t = G3_scratch;
 216     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
 217     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
 218     __ sub(FP, t, Gargs);                              // setup parameter pointer
 219     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
 220     __ mov(SP, O5_savedSP);
 221 
 222 
 223     // do the call
 224     //
 225     // the following register must be setup:
 226     //
 227     // G2_thread
 228     // G5_method
 229     // Gargs
 230     BLOCK_COMMENT("call Java function");
 231     __ jmpl(entry_point.as_in().as_register(), G0, O7);
 232     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
 233 
 234     BLOCK_COMMENT("call_stub_return_address:");
 235     return_pc = __ pc();
 236 
 237     // The callee, if it wasn't interpreted, can return with SP changed so
 238     // we can no longer assert of change of SP.
 239 
 240     // store result depending on type
 241     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
 242     //  is treated as T_INT)
 243     { const Register addr = result     .as_in().as_register();
 244       const Register type = result_type.as_in().as_register();
 245       Label is_long, is_float, is_double, is_object, exit;
 246       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
 247       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
 248       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
 249       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
 250       __ delayed()->nop();
 251 
 252       // store int result
 253       __ st(O0, addr, G0);
 254 
 255       __ BIND(exit);
 256       __ ret();
 257       __ delayed()->restore();
 258 
 259       __ BIND(is_object);
 260       __ ba(exit);
 261       __ delayed()->st_ptr(O0, addr, G0);
 262 
 263       __ BIND(is_float);
 264       __ ba(exit);
 265       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
 266 
 267       __ BIND(is_double);
 268       __ ba(exit);
 269       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 270 
 271       __ BIND(is_long);
 272       __ ba(exit);
 273       __ delayed()->st_long(O0, addr, G0);      // store entire long
 274      }
 275      return start;
 276   }
 277 
 278 
 279   //----------------------------------------------------------------------------------------------------
 280   // Return point for a Java call if there's an exception thrown in Java code.
 281   // The exception is caught and transformed into a pending exception stored in
 282   // JavaThread that can be tested from within the VM.
 283   //
 284   // Oexception: exception oop
 285 
 286   address generate_catch_exception() {
 287     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 288 
 289     address start = __ pc();
 290     // verify that thread corresponds
 291     __ verify_thread();
 292 
 293     const Register& temp_reg = Gtemp;
 294     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
 295     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
 296     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
 297 
 298     // set pending exception
 299     __ verify_oop(Oexception);
 300     __ st_ptr(Oexception, pending_exception_addr);
 301     __ set((intptr_t)__FILE__, temp_reg);
 302     __ st_ptr(temp_reg, exception_file_offset_addr);
 303     __ set((intptr_t)__LINE__, temp_reg);
 304     __ st(temp_reg, exception_line_offset_addr);
 305 
 306     // complete return to VM
 307     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 308 
 309     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
 310     __ jump_to(stub_ret, temp_reg);
 311     __ delayed()->nop();
 312 
 313     return start;
 314   }
 315 
 316 
 317   //----------------------------------------------------------------------------------------------------
 318   // Continuation point for runtime calls returning with a pending exception
 319   // The pending exception check happened in the runtime or native call stub
 320   // The pending exception in Thread is converted into a Java-level exception
 321   //
 322   // Contract with Java-level exception handler: O0 = exception
 323   //                                             O1 = throwing pc
 324 
 325   address generate_forward_exception() {
 326     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 327     address start = __ pc();
 328 
 329     // Upon entry, O7 has the return address returning into Java
 330     // (interpreted or compiled) code; i.e. the return address
 331     // becomes the throwing pc.
 332 
 333     const Register& handler_reg = Gtemp;
 334 
 335     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 336 
 337 #ifdef ASSERT
 338     // make sure that this code is only executed if there is a pending exception
 339     { Label L;
 340       __ ld_ptr(exception_addr, Gtemp);
 341       __ br_notnull_short(Gtemp, Assembler::pt, L);
 342       __ stop("StubRoutines::forward exception: no pending exception (1)");
 343       __ bind(L);
 344     }
 345 #endif
 346 
 347     // compute exception handler into handler_reg
 348     __ get_thread();
 349     __ ld_ptr(exception_addr, Oexception);
 350     __ verify_oop(Oexception);
 351     __ save_frame(0);             // compensates for compiler weakness
 352     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
 353     BLOCK_COMMENT("call exception_handler_for_return_address");
 354     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
 355     __ mov(O0, handler_reg);
 356     __ restore();                 // compensates for compiler weakness
 357 
 358     __ ld_ptr(exception_addr, Oexception);
 359     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
 360 
 361 #ifdef ASSERT
 362     // make sure exception is set
 363     { Label L;
 364       __ br_notnull_short(Oexception, Assembler::pt, L);
 365       __ stop("StubRoutines::forward exception: no pending exception (2)");
 366       __ bind(L);
 367     }
 368 #endif
 369     // jump to exception handler
 370     __ jmp(handler_reg, 0);
 371     // clear pending exception
 372     __ delayed()->st_ptr(G0, exception_addr);
 373 
 374     return start;
 375   }
 376 
 377   // Safefetch stubs.
 378   void generate_safefetch(const char* name, int size, address* entry,
 379                           address* fault_pc, address* continuation_pc) {
 380     // safefetch signatures:
 381     //   int      SafeFetch32(int*      adr, int      errValue);
 382     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
 383     //
 384     // arguments:
 385     //   o0 = adr
 386     //   o1 = errValue
 387     //
 388     // result:
 389     //   o0  = *adr or errValue
 390 
 391     StubCodeMark mark(this, "StubRoutines", name);
 392 
 393     // Entry point, pc or function descriptor.
 394     __ align(CodeEntryAlignment);
 395     *entry = __ pc();
 396 
 397     __ mov(O0, G1);  // g1 = o0
 398     __ mov(O1, O0);  // o0 = o1
 399     // Load *adr into c_rarg1, may fault.
 400     *fault_pc = __ pc();
 401     switch (size) {
 402       case 4:
 403         // int32_t
 404         __ ldsw(G1, 0, O0);  // o0 = [g1]
 405         break;
 406       case 8:
 407         // int64_t
 408         __ ldx(G1, 0, O0);   // o0 = [g1]
 409         break;
 410       default:
 411         ShouldNotReachHere();
 412     }
 413 
 414     // return errValue or *adr
 415     *continuation_pc = __ pc();
 416     // By convention with the trap handler we ensure there is a non-CTI
 417     // instruction in the trap shadow.
 418     __ nop();
 419     __ retl();
 420     __ delayed()->nop();
 421   }
 422 
 423   //------------------------------------------------------------------------------------------------------------------------
 424   // Continuation point for throwing of implicit exceptions that are not handled in
 425   // the current activation. Fabricates an exception oop and initiates normal
 426   // exception dispatching in this frame. Only callee-saved registers are preserved
 427   // (through the normal register window / RegisterMap handling).
 428   // If the compiler needs all registers to be preserved between the fault
 429   // point and the exception handler then it must assume responsibility for that in
 430   // AbstractCompiler::continuation_for_implicit_null_exception or
 431   // continuation_for_implicit_division_by_zero_exception. All other implicit
 432   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
 433   // either at call sites or otherwise assume that stack unwinding will be initiated,
 434   // so caller saved registers were assumed volatile in the compiler.
 435 
 436   // Note that we generate only this stub into a RuntimeStub, because it needs to be
 437   // properly traversed and ignored during GC, so we change the meaning of the "__"
 438   // macro within this method.
 439 #undef __
 440 #define __ masm->
 441 
 442   address generate_throw_exception(const char* name, address runtime_entry,
 443                                    Register arg1 = noreg, Register arg2 = noreg) {
 444 #ifdef ASSERT
 445     int insts_size = VerifyThread ? 1 * K : 600;
 446 #else
 447     int insts_size = VerifyThread ? 1 * K : 256;
 448 #endif /* ASSERT */
 449     int locs_size  = 32;
 450 
 451     CodeBuffer      code(name, insts_size, locs_size);
 452     MacroAssembler* masm = new MacroAssembler(&code);
 453 
 454     __ verify_thread();
 455 
 456     // This is an inlined and slightly modified version of call_VM
 457     // which has the ability to fetch the return PC out of thread-local storage
 458     __ assert_not_delayed();
 459 
 460     // Note that we always push a frame because on the SPARC
 461     // architecture, for all of our implicit exception kinds at call
 462     // sites, the implicit exception is taken before the callee frame
 463     // is pushed.
 464     __ save_frame(0);
 465 
 466     int frame_complete = __ offset();
 467 
 468     // Note that we always have a runtime stub frame on the top of stack by this point
 469     Register last_java_sp = SP;
 470     // 64-bit last_java_sp is biased!
 471     __ set_last_Java_frame(last_java_sp, G0);
 472     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
 473     __ save_thread(noreg);
 474     if (arg1 != noreg) {
 475       assert(arg2 != O1, "clobbered");
 476       __ mov(arg1, O1);
 477     }
 478     if (arg2 != noreg) {
 479       __ mov(arg2, O2);
 480     }
 481     // do the call
 482     BLOCK_COMMENT("call runtime_entry");
 483     __ call(runtime_entry, relocInfo::runtime_call_type);
 484     if (!VerifyThread)
 485       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
 486     else
 487       __ delayed()->nop();             // (thread already passed)
 488     __ restore_thread(noreg);
 489     __ reset_last_Java_frame();
 490 
 491     // check for pending exceptions. use Gtemp as scratch register.
 492 #ifdef ASSERT
 493     Label L;
 494 
 495     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 496     Register scratch_reg = Gtemp;
 497     __ ld_ptr(exception_addr, scratch_reg);
 498     __ br_notnull_short(scratch_reg, Assembler::pt, L);
 499     __ should_not_reach_here();
 500     __ bind(L);
 501 #endif // ASSERT
 502     BLOCK_COMMENT("call forward_exception_entry");
 503     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
 504     // we use O7 linkage so that forward_exception_entry has the issuing PC
 505     __ delayed()->restore();
 506 
 507     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
 508     return stub->entry_point();
 509   }
 510 
 511 #undef __
 512 #define __ _masm->
 513 
 514 
 515   // Generate a routine that sets all the registers so we
 516   // can tell if the stop routine prints them correctly.
 517   address generate_test_stop() {
 518     StubCodeMark mark(this, "StubRoutines", "test_stop");
 519     address start = __ pc();
 520 
 521     int i;
 522 
 523     __ save_frame(0);
 524 
 525     static jfloat zero = 0.0, one = 1.0;
 526 
 527     // put addr in L0, then load through L0 to F0
 528     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
 529     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
 530 
 531     // use add to put 2..18 in F2..F18
 532     for ( i = 2;  i <= 18;  ++i ) {
 533       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
 534     }
 535 
 536     // Now put double 2 in F16, double 18 in F18
 537     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
 538     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
 539 
 540     // use add to put 20..32 in F20..F32
 541     for (i = 20; i < 32; i += 2) {
 542       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
 543     }
 544 
 545     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
 546     for ( i = 0; i < 8; ++i ) {
 547       if (i < 6) {
 548         __ set(     i, as_iRegister(i));
 549         __ set(16 + i, as_oRegister(i));
 550         __ set(24 + i, as_gRegister(i));
 551       }
 552       __ set( 8 + i, as_lRegister(i));
 553     }
 554 
 555     __ stop("testing stop");
 556 
 557 
 558     __ ret();
 559     __ delayed()->restore();
 560 
 561     return start;
 562   }
 563 
 564 
 565   address generate_stop_subroutine() {
 566     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 567     address start = __ pc();
 568 
 569     __ stop_subroutine();
 570 
 571     return start;
 572   }
 573 
 574   address generate_flush_callers_register_windows() {
 575     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 576     address start = __ pc();
 577 
 578     __ flushw();
 579     __ retl(false);
 580     __ delayed()->add( FP, STACK_BIAS, O0 );
 581     // The returned value must be a stack pointer whose register save area
 582     // is flushed, and will stay flushed while the caller executes.
 583 
 584     return start;
 585   }
 586 
 587   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
 588   //
 589   // Arguments:
 590   //
 591   //      exchange_value: O0
 592   //      dest:           O1
 593   //
 594   // Results:
 595   //
 596   //     O0: the value previously stored in dest
 597   //
 598   address generate_atomic_xchg() {
 599     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 600     address start = __ pc();
 601 
 602     if (UseCASForSwap) {
 603       // Use CAS instead of swap, just in case the MP hardware
 604       // prefers to work with just one kind of synch. instruction.
 605       Label retry;
 606       __ BIND(retry);
 607       __ mov(O0, O3);       // scratch copy of exchange value
 608       __ ld(O1, 0, O2);     // observe the previous value
 609       // try to replace O2 with O3
 610       __ cas(O1, O2, O3);
 611       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 612 
 613       __ retl(false);
 614       __ delayed()->mov(O2, O0);  // report previous value to caller
 615     } else {
 616       __ retl(false);
 617       __ delayed()->swap(O1, 0, O0);
 618     }
 619 
 620     return start;
 621   }
 622 
 623 
 624   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 625   //
 626   // Arguments:
 627   //
 628   //      exchange_value: O0
 629   //      dest:           O1
 630   //      compare_value:  O2
 631   //
 632   // Results:
 633   //
 634   //     O0: the value previously stored in dest
 635   //
 636   address generate_atomic_cmpxchg() {
 637     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 638     address start = __ pc();
 639 
 640     // cmpxchg(dest, compare_value, exchange_value)
 641     __ cas(O1, O2, O0);
 642     __ retl(false);
 643     __ delayed()->nop();
 644 
 645     return start;
 646   }
 647 
 648   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 649   //
 650   // Arguments:
 651   //
 652   //      exchange_value: O1:O0
 653   //      dest:           O2
 654   //      compare_value:  O4:O3
 655   //
 656   // Results:
 657   //
 658   //     O1:O0: the value previously stored in dest
 659   //
 660   // Overwrites: G1,G2,G3
 661   //
 662   address generate_atomic_cmpxchg_long() {
 663     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 664     address start = __ pc();
 665 
 666     __ sllx(O0, 32, O0);
 667     __ srl(O1, 0, O1);
 668     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 669     __ sllx(O3, 32, O3);
 670     __ srl(O4, 0, O4);
 671     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 672     __ casx(O2, O3, O0);
 673     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 674     __ retl(false);
 675     __ delayed()->srlx(O0, 32, O0);
 676 
 677     return start;
 678   }
 679 
 680 
 681   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
 682   //
 683   // Arguments:
 684   //
 685   //      add_value: O0   (e.g., +1 or -1)
 686   //      dest:      O1
 687   //
 688   // Results:
 689   //
 690   //     O0: the new value stored in dest
 691   //
 692   // Overwrites: O3
 693   //
 694   address generate_atomic_add() {
 695     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 696     address start = __ pc();
 697     __ BIND(_atomic_add_stub);
 698 
 699     Label(retry);
 700     __ BIND(retry);
 701 
 702     __ lduw(O1, 0, O2);
 703     __ add(O0, O2, O3);
 704     __ cas(O1, O2, O3);
 705     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 706     __ retl(false);
 707     __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 708 
 709     return start;
 710   }
 711   Label _atomic_add_stub;  // called from other stubs
 712 
 713 
 714   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 715   // Arguments :
 716   //
 717   //      ret  : O0, returned
 718   //      icc/xcc: set as O0 (depending on wordSize)
 719   //      sub  : O1, argument, not changed
 720   //      super: O2, argument, not changed
 721   //      raddr: O7, blown by call
 722   address generate_partial_subtype_check() {
 723     __ align(CodeEntryAlignment);
 724     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 725     address start = __ pc();
 726     Label miss;
 727 
 728     __ save_frame(0);
 729     Register Rret   = I0;
 730     Register Rsub   = I1;
 731     Register Rsuper = I2;
 732 
 733     Register L0_ary_len = L0;
 734     Register L1_ary_ptr = L1;
 735     Register L2_super   = L2;
 736     Register L3_index   = L3;
 737 
 738     __ check_klass_subtype_slow_path(Rsub, Rsuper,
 739                                      L0, L1, L2, L3,
 740                                      NULL, &miss);
 741 
 742     // Match falls through here.
 743     __ addcc(G0,0,Rret);        // set Z flags, Z result
 744 
 745     __ ret();                   // Result in Rret is zero; flags set to Z
 746     __ delayed()->restore();
 747 
 748     __ BIND(miss);
 749     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 750 
 751     __ ret();                   // Result in Rret is != 0; flags set to NZ
 752     __ delayed()->restore();
 753 
 754     return start;
 755   }
 756 
 757 
 758   // Called from MacroAssembler::verify_oop
 759   //
 760   address generate_verify_oop_subroutine() {
 761     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
 762 
 763     address start = __ pc();
 764 
 765     __ verify_oop_subroutine();
 766 
 767     return start;
 768   }
 769 
 770 
 771   //
 772   // Verify that a register contains clean 32-bits positive value
 773   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
 774   //
 775   //  Input:
 776   //    Rint  -  32-bits value
 777   //    Rtmp  -  scratch
 778   //
 779   void assert_clean_int(Register Rint, Register Rtmp) {
 780   #if defined(ASSERT)
 781     __ signx(Rint, Rtmp);
 782     __ cmp(Rint, Rtmp);
 783     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
 784   #endif
 785   }
 786 
 787   //
 788   //  Generate overlap test for array copy stubs
 789   //
 790   //  Input:
 791   //    O0    -  array1
 792   //    O1    -  array2
 793   //    O2    -  element count
 794   //
 795   //  Kills temps:  O3, O4
 796   //
 797   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
 798     assert(no_overlap_target != NULL, "must be generated");
 799     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
 800   }
 801   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
 802     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
 803   }
 804   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
 805     const Register from       = O0;
 806     const Register to         = O1;
 807     const Register count      = O2;
 808     const Register to_from    = O3; // to - from
 809     const Register byte_count = O4; // count << log2_elem_size
 810 
 811       __ subcc(to, from, to_from);
 812       __ sll_ptr(count, log2_elem_size, byte_count);
 813       if (NOLp == NULL)
 814         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
 815       else
 816         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
 817       __ delayed()->cmp(to_from, byte_count);
 818       if (NOLp == NULL)
 819         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
 820       else
 821         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
 822       __ delayed()->nop();
 823   }
 824 
 825   //
 826   //  Generate pre-write barrier for array.
 827   //
 828   //  Input:
 829   //     addr     - register containing starting address
 830   //     count    - register containing element count
 831   //     tmp      - scratch register
 832   //
 833   //  The input registers are overwritten.
 834   //
 835   void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
 836     BarrierSet* bs = Universe::heap()->barrier_set();
 837     switch (bs->kind()) {
 838       case BarrierSet::G1SATBCTLogging:
 839         // With G1, don't generate the call if we statically know that the target in uninitialized
 840         if (!dest_uninitialized) {
 841           __ save_frame(0);
 842           // Save the necessary global regs... will be used after.
 843           if (addr->is_global()) {
 844             __ mov(addr, L0);
 845           }
 846           if (count->is_global()) {
 847             __ mov(count, L1);
 848           }
 849           __ mov(addr->after_save(), O0);
 850           // Get the count into O1
 851           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
 852           __ delayed()->mov(count->after_save(), O1);
 853           if (addr->is_global()) {
 854             __ mov(L0, addr);
 855           }
 856           if (count->is_global()) {
 857             __ mov(L1, count);
 858           }
 859           __ restore();
 860         }
 861         break;
 862       case BarrierSet::CardTableForRS:
 863       case BarrierSet::CardTableExtension:
 864       case BarrierSet::ModRef:
 865         break;
 866       default:
 867         ShouldNotReachHere();
 868     }
 869   }
 870   //
 871   //  Generate post-write barrier for array.
 872   //
 873   //  Input:
 874   //     addr     - register containing starting address
 875   //     count    - register containing element count
 876   //     tmp      - scratch register
 877   //
 878   //  The input registers are overwritten.
 879   //
 880   void gen_write_ref_array_post_barrier(Register addr, Register count,
 881                                         Register tmp) {
 882     BarrierSet* bs = Universe::heap()->barrier_set();
 883 
 884     switch (bs->kind()) {
 885       case BarrierSet::G1SATBCTLogging:
 886         {
 887           // Get some new fresh output registers.
 888           __ save_frame(0);
 889           __ mov(addr->after_save(), O0);
 890           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
 891           __ delayed()->mov(count->after_save(), O1);
 892           __ restore();
 893         }
 894         break;
 895       case BarrierSet::CardTableForRS:
 896       case BarrierSet::CardTableExtension:
 897         {
 898           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
 899           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 900           assert_different_registers(addr, count, tmp);
 901 
 902           Label L_loop;
 903 
 904           __ sll_ptr(count, LogBytesPerHeapOop, count);
 905           __ sub(count, BytesPerHeapOop, count);
 906           __ add(count, addr, count);
 907           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 908           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
 909           __ srl_ptr(count, CardTableModRefBS::card_shift, count);
 910           __ sub(count, addr, count);
 911           AddressLiteral rs(ct->byte_map_base);
 912           __ set(rs, tmp);
 913         __ BIND(L_loop);
 914           __ stb(G0, tmp, addr);
 915           __ subcc(count, 1, count);
 916           __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
 917           __ delayed()->add(addr, 1, addr);
 918         }
 919         break;
 920       case BarrierSet::ModRef:
 921         break;
 922       default:
 923         ShouldNotReachHere();
 924     }
 925   }
 926 
 927   //
 928   // Generate main code for disjoint arraycopy
 929   //
 930   typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
 931                                               Label& L_loop, bool use_prefetch, bool use_bis);
 932 
 933   void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
 934                           int iter_size, StubGenerator::CopyLoopFunc copy_loop_func) {
 935     Label L_copy;
 936 
 937     assert(log2_elem_size <= 3, "the following code should be changed");
 938     int count_dec = 16>>log2_elem_size;
 939 
 940     int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
 941     assert(prefetch_dist < 4096, "invalid value");
 942     prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
 943     int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
 944 
 945     if (UseBlockCopy) {
 946       Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
 947 
 948       // 64 bytes tail + bytes copied in one loop iteration
 949       int tail_size = 64 + iter_size;
 950       int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
 951       // Use BIS copy only for big arrays since it requires membar.
 952       __ set(block_copy_count, O4);
 953       __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
 954       // This code is for disjoint source and destination:
 955       //   to <= from || to >= from+count
 956       // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
 957       __ sub(from, to, O4);
 958       __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
 959       __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
 960 
 961       __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
 962       // BIS should not be used to copy tail (64 bytes+iter_size)
 963       // to avoid zeroing of following values.
 964       __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
 965 
 966       if (prefetch_count > 0) { // rounded up to one iteration count
 967         // Do prefetching only if copy size is bigger
 968         // than prefetch distance.
 969         __ set(prefetch_count, O4);
 970         __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
 971         __ sub(count, prefetch_count, count);
 972 
 973         (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
 974         __ add(count, prefetch_count, count); // restore count
 975 
 976       } // prefetch_count > 0
 977 
 978       (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
 979       __ add(count, (tail_size>>log2_elem_size), count); // restore count
 980 
 981       __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
 982       // BIS needs membar.
 983       __ membar(Assembler::StoreLoad);
 984       // Copy tail
 985       __ ba_short(L_copy);
 986 
 987       __ BIND(L_skip_block_copy);
 988     } // UseBlockCopy
 989 
 990     if (prefetch_count > 0) { // rounded up to one iteration count
 991       // Do prefetching only if copy size is bigger
 992       // than prefetch distance.
 993       __ set(prefetch_count, O4);
 994       __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
 995       __ sub(count, prefetch_count, count);
 996 
 997       Label L_copy_prefetch;
 998       (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
 999       __ add(count, prefetch_count, count); // restore count
1000 
1001     } // prefetch_count > 0
1002 
1003     (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
1004   }
1005 
1006 
1007 
1008   //
1009   // Helper methods for copy_16_bytes_forward_with_shift()
1010   //
1011   void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
1012                                 Label& L_loop, bool use_prefetch, bool use_bis) {
1013 
1014     const Register left_shift  = G1; // left  shift bit counter
1015     const Register right_shift = G5; // right shift bit counter
1016 
1017     __ align(OptoLoopAlignment);
1018     __ BIND(L_loop);
1019     if (use_prefetch) {
1020       if (ArraycopySrcPrefetchDistance > 0) {
1021         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
1022       }
1023       if (ArraycopyDstPrefetchDistance > 0) {
1024         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
1025       }
1026     }
1027     __ ldx(from, 0, O4);
1028     __ ldx(from, 8, G4);
1029     __ inc(to, 16);
1030     __ inc(from, 16);
1031     __ deccc(count, count_dec); // Can we do next iteration after this one?
1032     __ srlx(O4, right_shift, G3);
1033     __ bset(G3, O3);
1034     __ sllx(O4, left_shift,  O4);
1035     __ srlx(G4, right_shift, G3);
1036     __ bset(G3, O4);
1037     if (use_bis) {
1038       __ stxa(O3, to, -16);
1039       __ stxa(O4, to, -8);
1040     } else {
1041       __ stx(O3, to, -16);
1042       __ stx(O4, to, -8);
1043     }
1044     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1045     __ delayed()->sllx(G4, left_shift,  O3);
1046   }
1047 
1048   // Copy big chunks forward with shift
1049   //
1050   // Inputs:
1051   //   from      - source arrays
1052   //   to        - destination array aligned to 8-bytes
1053   //   count     - elements count to copy >= the count equivalent to 16 bytes
1054   //   count_dec - elements count's decrement equivalent to 16 bytes
1055   //   L_copy_bytes - copy exit label
1056   //
1057   void copy_16_bytes_forward_with_shift(Register from, Register to,
1058                      Register count, int log2_elem_size, Label& L_copy_bytes) {
1059     Label L_aligned_copy, L_copy_last_bytes;
1060     assert(log2_elem_size <= 3, "the following code should be changed");
1061     int count_dec = 16>>log2_elem_size;
1062 
1063     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1064     __ andcc(from, 7, G1); // misaligned bytes
1065     __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1066     __ delayed()->nop();
1067 
1068     const Register left_shift  = G1; // left  shift bit counter
1069     const Register right_shift = G5; // right shift bit counter
1070 
1071     __ sll(G1, LogBitsPerByte, left_shift);
1072     __ mov(64, right_shift);
1073     __ sub(right_shift, left_shift, right_shift);
1074 
1075     //
1076     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1077     // to form 2 aligned 8-bytes chunks to store.
1078     //
1079     __ dec(count, count_dec);   // Pre-decrement 'count'
1080     __ andn(from, 7, from);     // Align address
1081     __ ldx(from, 0, O3);
1082     __ inc(from, 8);
1083     __ sllx(O3, left_shift,  O3);
1084 
1085     disjoint_copy_core(from, to, count, log2_elem_size, 16, &StubGenerator::copy_16_bytes_shift_loop);
1086 
1087     __ inccc(count, count_dec>>1 ); // + 8 bytes
1088     __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1089     __ delayed()->inc(count, count_dec>>1); // restore 'count'
1090 
1091     // copy 8 bytes, part of them already loaded in O3
1092     __ ldx(from, 0, O4);
1093     __ inc(to, 8);
1094     __ inc(from, 8);
1095     __ srlx(O4, right_shift, G3);
1096     __ bset(O3, G3);
1097     __ stx(G3, to, -8);
1098 
1099     __ BIND(L_copy_last_bytes);
1100     __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1101     __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1102     __ delayed()->sub(from, right_shift, from);       // restore address
1103 
1104     __ BIND(L_aligned_copy);
1105   }
1106 
1107   // Copy big chunks backward with shift
1108   //
1109   // Inputs:
1110   //   end_from  - source arrays end address
1111   //   end_to    - destination array end address aligned to 8-bytes
1112   //   count     - elements count to copy >= the count equivalent to 16 bytes
1113   //   count_dec - elements count's decrement equivalent to 16 bytes
1114   //   L_aligned_copy - aligned copy exit label
1115   //   L_copy_bytes   - copy exit label
1116   //
1117   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1118                      Register count, int count_dec,
1119                      Label& L_aligned_copy, Label& L_copy_bytes) {
1120     Label L_loop, L_copy_last_bytes;
1121 
1122     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1123       __ andcc(end_from, 7, G1); // misaligned bytes
1124       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1125       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1126 
1127     const Register left_shift  = G1; // left  shift bit counter
1128     const Register right_shift = G5; // right shift bit counter
1129 
1130       __ sll(G1, LogBitsPerByte, left_shift);
1131       __ mov(64, right_shift);
1132       __ sub(right_shift, left_shift, right_shift);
1133 
1134     //
1135     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1136     // to form 2 aligned 8-bytes chunks to store.
1137     //
1138       __ andn(end_from, 7, end_from);     // Align address
1139       __ ldx(end_from, 0, O3);
1140       __ align(OptoLoopAlignment);
1141     __ BIND(L_loop);
1142       __ ldx(end_from, -8, O4);
1143       __ deccc(count, count_dec); // Can we do next iteration after this one?
1144       __ ldx(end_from, -16, G4);
1145       __ dec(end_to, 16);
1146       __ dec(end_from, 16);
1147       __ srlx(O3, right_shift, O3);
1148       __ sllx(O4, left_shift,  G3);
1149       __ bset(G3, O3);
1150       __ stx(O3, end_to, 8);
1151       __ srlx(O4, right_shift, O4);
1152       __ sllx(G4, left_shift,  G3);
1153       __ bset(G3, O4);
1154       __ stx(O4, end_to, 0);
1155       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1156       __ delayed()->mov(G4, O3);
1157 
1158       __ inccc(count, count_dec>>1 ); // + 8 bytes
1159       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1160       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1161 
1162       // copy 8 bytes, part of them already loaded in O3
1163       __ ldx(end_from, -8, O4);
1164       __ dec(end_to, 8);
1165       __ dec(end_from, 8);
1166       __ srlx(O3, right_shift, O3);
1167       __ sllx(O4, left_shift,  G3);
1168       __ bset(O3, G3);
1169       __ stx(G3, end_to, 0);
1170 
1171     __ BIND(L_copy_last_bytes);
1172       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
1173       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1174       __ delayed()->add(end_from, left_shift, end_from); // restore address
1175   }
1176 
1177   //
1178   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
1179   //  "from" and "to" addresses are assumed to be heapword aligned.
1180   //
1181   // Arguments for generated stub:
1182   //      from:  O0
1183   //      to:    O1
1184   //      count: O2 treated as signed
1185   //
1186   address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
1187     __ align(CodeEntryAlignment);
1188     StubCodeMark mark(this, "StubRoutines", name);
1189     address start = __ pc();
1190 
1191     Label L_skip_alignment, L_align;
1192     Label L_copy_byte, L_copy_byte_loop, L_exit;
1193 
1194     const Register from      = O0;   // source array address
1195     const Register to        = O1;   // destination array address
1196     const Register count     = O2;   // elements count
1197     const Register offset    = O5;   // offset from start of arrays
1198     // O3, O4, G3, G4 are used as temp registers
1199 
1200     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1201 
1202     if (entry != NULL) {
1203       *entry = __ pc();
1204       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1205       BLOCK_COMMENT("Entry:");
1206     }
1207 
1208     // for short arrays, just do single element copy
1209     __ cmp(count, 23); // 16 + 7
1210     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1211     __ delayed()->mov(G0, offset);
1212 
1213     if (aligned) {
1214       // 'aligned' == true when it is known statically during compilation
1215       // of this arraycopy call site that both 'from' and 'to' addresses
1216       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1217       //
1218       // Aligned arrays have 4 bytes alignment in 32-bits VM
1219       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1220       //
1221     } else {
1222       // copy bytes to align 'to' on 8 byte boundary
1223       __ andcc(to, 7, G1); // misaligned bytes
1224       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1225       __ delayed()->neg(G1);
1226       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
1227       __ sub(count, G1, count);
1228     __ BIND(L_align);
1229       __ ldub(from, 0, O3);
1230       __ deccc(G1);
1231       __ inc(from);
1232       __ stb(O3, to, 0);
1233       __ br(Assembler::notZero, false, Assembler::pt, L_align);
1234       __ delayed()->inc(to);
1235     __ BIND(L_skip_alignment);
1236     }
1237     if (!aligned)
1238     {
1239       // Copy with shift 16 bytes per iteration if arrays do not have
1240       // the same alignment mod 8, otherwise fall through to the next
1241       // code for aligned copy.
1242       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1243       // Also jump over aligned copy after the copy with shift completed.
1244 
1245       copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
1246     }
1247 
1248     // Both array are 8 bytes aligned, copy 16 bytes at a time
1249       __ and3(count, 7, G4); // Save count
1250       __ srl(count, 3, count);
1251      generate_disjoint_long_copy_core(aligned);
1252       __ mov(G4, count);     // Restore count
1253 
1254     // copy tailing bytes
1255     __ BIND(L_copy_byte);
1256       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1257       __ align(OptoLoopAlignment);
1258     __ BIND(L_copy_byte_loop);
1259       __ ldub(from, offset, O3);
1260       __ deccc(count);
1261       __ stb(O3, to, offset);
1262       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1263       __ delayed()->inc(offset);
1264 
1265     __ BIND(L_exit);
1266       // O3, O4 are used as temp registers
1267       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1268       __ retl();
1269       __ delayed()->mov(G0, O0); // return 0
1270     return start;
1271   }
1272 
1273   //
1274   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
1275   //  "from" and "to" addresses are assumed to be heapword aligned.
1276   //
1277   // Arguments for generated stub:
1278   //      from:  O0
1279   //      to:    O1
1280   //      count: O2 treated as signed
1281   //
1282   address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1283                                       address *entry, const char *name) {
1284     // Do reverse copy.
1285 
1286     __ align(CodeEntryAlignment);
1287     StubCodeMark mark(this, "StubRoutines", name);
1288     address start = __ pc();
1289 
1290     Label L_skip_alignment, L_align, L_aligned_copy;
1291     Label L_copy_byte, L_copy_byte_loop, L_exit;
1292 
1293     const Register from      = O0;   // source array address
1294     const Register to        = O1;   // destination array address
1295     const Register count     = O2;   // elements count
1296     const Register end_from  = from; // source array end address
1297     const Register end_to    = to;   // destination array end address
1298 
1299     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1300 
1301     if (entry != NULL) {
1302       *entry = __ pc();
1303       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1304       BLOCK_COMMENT("Entry:");
1305     }
1306 
1307     array_overlap_test(nooverlap_target, 0);
1308 
1309     __ add(to, count, end_to);       // offset after last copied element
1310 
1311     // for short arrays, just do single element copy
1312     __ cmp(count, 23); // 16 + 7
1313     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1314     __ delayed()->add(from, count, end_from);
1315 
1316     {
1317       // Align end of arrays since they could be not aligned even
1318       // when arrays itself are aligned.
1319 
1320       // copy bytes to align 'end_to' on 8 byte boundary
1321       __ andcc(end_to, 7, G1); // misaligned bytes
1322       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1323       __ delayed()->nop();
1324       __ sub(count, G1, count);
1325     __ BIND(L_align);
1326       __ dec(end_from);
1327       __ dec(end_to);
1328       __ ldub(end_from, 0, O3);
1329       __ deccc(G1);
1330       __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1331       __ delayed()->stb(O3, end_to, 0);
1332     __ BIND(L_skip_alignment);
1333     }
1334     if (aligned) {
1335       // Both arrays are aligned to 8-bytes in 64-bits VM.
1336       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1337       // in unaligned case.
1338       __ dec(count, 16);
1339     } else
1340     {
1341       // Copy with shift 16 bytes per iteration if arrays do not have
1342       // the same alignment mod 8, otherwise jump to the next
1343       // code for aligned copy (and substracting 16 from 'count' before jump).
1344       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1345       // Also jump over aligned copy after the copy with shift completed.
1346 
1347       copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1348                                         L_aligned_copy, L_copy_byte);
1349     }
1350     // copy 4 elements (16 bytes) at a time
1351       __ align(OptoLoopAlignment);
1352     __ BIND(L_aligned_copy);
1353       __ dec(end_from, 16);
1354       __ ldx(end_from, 8, O3);
1355       __ ldx(end_from, 0, O4);
1356       __ dec(end_to, 16);
1357       __ deccc(count, 16);
1358       __ stx(O3, end_to, 8);
1359       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1360       __ delayed()->stx(O4, end_to, 0);
1361       __ inc(count, 16);
1362 
1363     // copy 1 element (2 bytes) at a time
1364     __ BIND(L_copy_byte);
1365       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1366       __ align(OptoLoopAlignment);
1367     __ BIND(L_copy_byte_loop);
1368       __ dec(end_from);
1369       __ dec(end_to);
1370       __ ldub(end_from, 0, O4);
1371       __ deccc(count);
1372       __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1373       __ delayed()->stb(O4, end_to, 0);
1374 
1375     __ BIND(L_exit);
1376     // O3, O4 are used as temp registers
1377     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1378     __ retl();
1379     __ delayed()->mov(G0, O0); // return 0
1380     return start;
1381   }
1382 
1383   //
1384   //  Generate stub for disjoint short copy.  If "aligned" is true, the
1385   //  "from" and "to" addresses are assumed to be heapword aligned.
1386   //
1387   // Arguments for generated stub:
1388   //      from:  O0
1389   //      to:    O1
1390   //      count: O2 treated as signed
1391   //
1392   address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
1393     __ align(CodeEntryAlignment);
1394     StubCodeMark mark(this, "StubRoutines", name);
1395     address start = __ pc();
1396 
1397     Label L_skip_alignment, L_skip_alignment2;
1398     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1399 
1400     const Register from      = O0;   // source array address
1401     const Register to        = O1;   // destination array address
1402     const Register count     = O2;   // elements count
1403     const Register offset    = O5;   // offset from start of arrays
1404     // O3, O4, G3, G4 are used as temp registers
1405 
1406     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1407 
1408     if (entry != NULL) {
1409       *entry = __ pc();
1410       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1411       BLOCK_COMMENT("Entry:");
1412     }
1413 
1414     // for short arrays, just do single element copy
1415     __ cmp(count, 11); // 8 + 3  (22 bytes)
1416     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1417     __ delayed()->mov(G0, offset);
1418 
1419     if (aligned) {
1420       // 'aligned' == true when it is known statically during compilation
1421       // of this arraycopy call site that both 'from' and 'to' addresses
1422       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1423       //
1424       // Aligned arrays have 4 bytes alignment in 32-bits VM
1425       // and 8 bytes - in 64-bits VM.
1426       //
1427     } else {
1428       // copy 1 element if necessary to align 'to' on an 4 bytes
1429       __ andcc(to, 3, G0);
1430       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1431       __ delayed()->lduh(from, 0, O3);
1432       __ inc(from, 2);
1433       __ inc(to, 2);
1434       __ dec(count);
1435       __ sth(O3, to, -2);
1436     __ BIND(L_skip_alignment);
1437 
1438       // copy 2 elements to align 'to' on an 8 byte boundary
1439       __ andcc(to, 7, G0);
1440       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1441       __ delayed()->lduh(from, 0, O3);
1442       __ dec(count, 2);
1443       __ lduh(from, 2, O4);
1444       __ inc(from, 4);
1445       __ inc(to, 4);
1446       __ sth(O3, to, -4);
1447       __ sth(O4, to, -2);
1448     __ BIND(L_skip_alignment2);
1449     }
1450     if (!aligned)
1451     {
1452       // Copy with shift 16 bytes per iteration if arrays do not have
1453       // the same alignment mod 8, otherwise fall through to the next
1454       // code for aligned copy.
1455       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1456       // Also jump over aligned copy after the copy with shift completed.
1457 
1458       copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
1459     }
1460 
1461     // Both array are 8 bytes aligned, copy 16 bytes at a time
1462       __ and3(count, 3, G4); // Save
1463       __ srl(count, 2, count);
1464      generate_disjoint_long_copy_core(aligned);
1465       __ mov(G4, count); // restore
1466 
1467     // copy 1 element at a time
1468     __ BIND(L_copy_2_bytes);
1469       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1470       __ align(OptoLoopAlignment);
1471     __ BIND(L_copy_2_bytes_loop);
1472       __ lduh(from, offset, O3);
1473       __ deccc(count);
1474       __ sth(O3, to, offset);
1475       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1476       __ delayed()->inc(offset, 2);
1477 
1478     __ BIND(L_exit);
1479       // O3, O4 are used as temp registers
1480       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1481       __ retl();
1482       __ delayed()->mov(G0, O0); // return 0
1483     return start;
1484   }
1485 
1486   //
1487   //  Generate stub for disjoint short fill.  If "aligned" is true, the
1488   //  "to" address is assumed to be heapword aligned.
1489   //
1490   // Arguments for generated stub:
1491   //      to:    O0
1492   //      value: O1
1493   //      count: O2 treated as signed
1494   //
1495   address generate_fill(BasicType t, bool aligned, const char* name) {
1496     __ align(CodeEntryAlignment);
1497     StubCodeMark mark(this, "StubRoutines", name);
1498     address start = __ pc();
1499 
1500     const Register to        = O0;   // source array address
1501     const Register value     = O1;   // fill value
1502     const Register count     = O2;   // elements count
1503     // O3 is used as a temp register
1504 
1505     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1506 
1507     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
1508     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
1509 
1510     int shift = -1;
1511     switch (t) {
1512        case T_BYTE:
1513         shift = 2;
1514         break;
1515        case T_SHORT:
1516         shift = 1;
1517         break;
1518       case T_INT:
1519          shift = 0;
1520         break;
1521       default: ShouldNotReachHere();
1522     }
1523 
1524     BLOCK_COMMENT("Entry:");
1525 
1526     if (t == T_BYTE) {
1527       // Zero extend value
1528       __ and3(value, 0xff, value);
1529       __ sllx(value, 8, O3);
1530       __ or3(value, O3, value);
1531     }
1532     if (t == T_SHORT) {
1533       // Zero extend value
1534       __ sllx(value, 48, value);
1535       __ srlx(value, 48, value);
1536     }
1537     if (t == T_BYTE || t == T_SHORT) {
1538       __ sllx(value, 16, O3);
1539       __ or3(value, O3, value);
1540     }
1541 
1542     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
1543     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
1544     __ delayed()->andcc(count, 1, G0);
1545 
1546     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1547       // align source address at 4 bytes address boundary
1548       if (t == T_BYTE) {
1549         // One byte misalignment happens only for byte arrays
1550         __ andcc(to, 1, G0);
1551         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
1552         __ delayed()->nop();
1553         __ stb(value, to, 0);
1554         __ inc(to, 1);
1555         __ dec(count, 1);
1556         __ BIND(L_skip_align1);
1557       }
1558       // Two bytes misalignment happens only for byte and short (char) arrays
1559       __ andcc(to, 2, G0);
1560       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
1561       __ delayed()->nop();
1562       __ sth(value, to, 0);
1563       __ inc(to, 2);
1564       __ dec(count, 1 << (shift - 1));
1565       __ BIND(L_skip_align2);
1566     }
1567     if (!aligned) {
1568     // align to 8 bytes, we know we are 4 byte aligned to start
1569     __ andcc(to, 7, G0);
1570     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
1571     __ delayed()->nop();
1572     __ stw(value, to, 0);
1573     __ inc(to, 4);
1574     __ dec(count, 1 << shift);
1575     __ BIND(L_fill_32_bytes);
1576     }
1577 
1578     if (t == T_INT) {
1579       // Zero extend value
1580       __ srl(value, 0, value);
1581     }
1582     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
1583       __ sllx(value, 32, O3);
1584       __ or3(value, O3, value);
1585     }
1586 
1587     Label L_check_fill_8_bytes;
1588     // Fill 32-byte chunks
1589     __ subcc(count, 8 << shift, count);
1590     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
1591     __ delayed()->nop();
1592 
1593     Label L_fill_32_bytes_loop, L_fill_4_bytes;
1594     __ align(16);
1595     __ BIND(L_fill_32_bytes_loop);
1596 
1597     __ stx(value, to, 0);
1598     __ stx(value, to, 8);
1599     __ stx(value, to, 16);
1600     __ stx(value, to, 24);
1601 
1602     __ subcc(count, 8 << shift, count);
1603     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
1604     __ delayed()->add(to, 32, to);
1605 
1606     __ BIND(L_check_fill_8_bytes);
1607     __ addcc(count, 8 << shift, count);
1608     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
1609     __ delayed()->subcc(count, 1 << (shift + 1), count);
1610     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
1611     __ delayed()->andcc(count, 1<<shift, G0);
1612 
1613     //
1614     // length is too short, just fill 8 bytes at a time
1615     //
1616     Label L_fill_8_bytes_loop;
1617     __ BIND(L_fill_8_bytes_loop);
1618     __ stx(value, to, 0);
1619     __ subcc(count, 1 << (shift + 1), count);
1620     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
1621     __ delayed()->add(to, 8, to);
1622 
1623     // fill trailing 4 bytes
1624     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
1625     if (t == T_INT) {
1626       __ BIND(L_fill_elements);
1627     }
1628     __ BIND(L_fill_4_bytes);
1629     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
1630     if (t == T_BYTE || t == T_SHORT) {
1631       __ delayed()->andcc(count, 1<<(shift-1), G0);
1632     } else {
1633       __ delayed()->nop();
1634     }
1635     __ stw(value, to, 0);
1636     if (t == T_BYTE || t == T_SHORT) {
1637       __ inc(to, 4);
1638       // fill trailing 2 bytes
1639       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
1640       __ BIND(L_fill_2_bytes);
1641       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
1642       __ delayed()->andcc(count, 1, count);
1643       __ sth(value, to, 0);
1644       if (t == T_BYTE) {
1645         __ inc(to, 2);
1646         // fill trailing byte
1647         __ andcc(count, 1, count);  // in delay slot of branches
1648         __ BIND(L_fill_byte);
1649         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1650         __ delayed()->nop();
1651         __ stb(value, to, 0);
1652       } else {
1653         __ BIND(L_fill_byte);
1654       }
1655     } else {
1656       __ BIND(L_fill_2_bytes);
1657     }
1658     __ BIND(L_exit);
1659     __ retl();
1660     __ delayed()->nop();
1661 
1662     // Handle copies less than 8 bytes.  Int is handled elsewhere.
1663     if (t == T_BYTE) {
1664       __ BIND(L_fill_elements);
1665       Label L_fill_2, L_fill_4;
1666       // in delay slot __ andcc(count, 1, G0);
1667       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1668       __ delayed()->andcc(count, 2, G0);
1669       __ stb(value, to, 0);
1670       __ inc(to, 1);
1671       __ BIND(L_fill_2);
1672       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
1673       __ delayed()->andcc(count, 4, G0);
1674       __ stb(value, to, 0);
1675       __ stb(value, to, 1);
1676       __ inc(to, 2);
1677       __ BIND(L_fill_4);
1678       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1679       __ delayed()->nop();
1680       __ stb(value, to, 0);
1681       __ stb(value, to, 1);
1682       __ stb(value, to, 2);
1683       __ retl();
1684       __ delayed()->stb(value, to, 3);
1685     }
1686 
1687     if (t == T_SHORT) {
1688       Label L_fill_2;
1689       __ BIND(L_fill_elements);
1690       // in delay slot __ andcc(count, 1, G0);
1691       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1692       __ delayed()->andcc(count, 2, G0);
1693       __ sth(value, to, 0);
1694       __ inc(to, 2);
1695       __ BIND(L_fill_2);
1696       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1697       __ delayed()->nop();
1698       __ sth(value, to, 0);
1699       __ retl();
1700       __ delayed()->sth(value, to, 2);
1701     }
1702     return start;
1703   }
1704 
1705   //
1706   //  Generate stub for conjoint short copy.  If "aligned" is true, the
1707   //  "from" and "to" addresses are assumed to be heapword aligned.
1708   //
1709   // Arguments for generated stub:
1710   //      from:  O0
1711   //      to:    O1
1712   //      count: O2 treated as signed
1713   //
1714   address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1715                                        address *entry, const char *name) {
1716     // Do reverse copy.
1717 
1718     __ align(CodeEntryAlignment);
1719     StubCodeMark mark(this, "StubRoutines", name);
1720     address start = __ pc();
1721 
1722     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1723     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1724 
1725     const Register from      = O0;   // source array address
1726     const Register to        = O1;   // destination array address
1727     const Register count     = O2;   // elements count
1728     const Register end_from  = from; // source array end address
1729     const Register end_to    = to;   // destination array end address
1730 
1731     const Register byte_count = O3;  // bytes count to copy
1732 
1733     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1734 
1735     if (entry != NULL) {
1736       *entry = __ pc();
1737       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1738       BLOCK_COMMENT("Entry:");
1739     }
1740 
1741     array_overlap_test(nooverlap_target, 1);
1742 
1743     __ sllx(count, LogBytesPerShort, byte_count);
1744     __ add(to, byte_count, end_to);  // offset after last copied element
1745 
1746     // for short arrays, just do single element copy
1747     __ cmp(count, 11); // 8 + 3  (22 bytes)
1748     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1749     __ delayed()->add(from, byte_count, end_from);
1750 
1751     {
1752       // Align end of arrays since they could be not aligned even
1753       // when arrays itself are aligned.
1754 
1755       // copy 1 element if necessary to align 'end_to' on an 4 bytes
1756       __ andcc(end_to, 3, G0);
1757       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1758       __ delayed()->lduh(end_from, -2, O3);
1759       __ dec(end_from, 2);
1760       __ dec(end_to, 2);
1761       __ dec(count);
1762       __ sth(O3, end_to, 0);
1763     __ BIND(L_skip_alignment);
1764 
1765       // copy 2 elements to align 'end_to' on an 8 byte boundary
1766       __ andcc(end_to, 7, G0);
1767       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1768       __ delayed()->lduh(end_from, -2, O3);
1769       __ dec(count, 2);
1770       __ lduh(end_from, -4, O4);
1771       __ dec(end_from, 4);
1772       __ dec(end_to, 4);
1773       __ sth(O3, end_to, 2);
1774       __ sth(O4, end_to, 0);
1775     __ BIND(L_skip_alignment2);
1776     }
1777     if (aligned) {
1778       // Both arrays are aligned to 8-bytes in 64-bits VM.
1779       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1780       // in unaligned case.
1781       __ dec(count, 8);
1782     } else
1783     {
1784       // Copy with shift 16 bytes per iteration if arrays do not have
1785       // the same alignment mod 8, otherwise jump to the next
1786       // code for aligned copy (and substracting 8 from 'count' before jump).
1787       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1788       // Also jump over aligned copy after the copy with shift completed.
1789 
1790       copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1791                                         L_aligned_copy, L_copy_2_bytes);
1792     }
1793     // copy 4 elements (16 bytes) at a time
1794       __ align(OptoLoopAlignment);
1795     __ BIND(L_aligned_copy);
1796       __ dec(end_from, 16);
1797       __ ldx(end_from, 8, O3);
1798       __ ldx(end_from, 0, O4);
1799       __ dec(end_to, 16);
1800       __ deccc(count, 8);
1801       __ stx(O3, end_to, 8);
1802       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1803       __ delayed()->stx(O4, end_to, 0);
1804       __ inc(count, 8);
1805 
1806     // copy 1 element (2 bytes) at a time
1807     __ BIND(L_copy_2_bytes);
1808       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1809     __ BIND(L_copy_2_bytes_loop);
1810       __ dec(end_from, 2);
1811       __ dec(end_to, 2);
1812       __ lduh(end_from, 0, O4);
1813       __ deccc(count);
1814       __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1815       __ delayed()->sth(O4, end_to, 0);
1816 
1817     __ BIND(L_exit);
1818     // O3, O4 are used as temp registers
1819     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1820     __ retl();
1821     __ delayed()->mov(G0, O0); // return 0
1822     return start;
1823   }
1824 
1825   //
1826   // Helper methods for generate_disjoint_int_copy_core()
1827   //
1828   void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
1829                           Label& L_loop, bool use_prefetch, bool use_bis) {
1830 
1831     __ align(OptoLoopAlignment);
1832     __ BIND(L_loop);
1833     if (use_prefetch) {
1834       if (ArraycopySrcPrefetchDistance > 0) {
1835         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
1836       }
1837       if (ArraycopyDstPrefetchDistance > 0) {
1838         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
1839       }
1840     }
1841     __ ldx(from, 4, O4);
1842     __ ldx(from, 12, G4);
1843     __ inc(to, 16);
1844     __ inc(from, 16);
1845     __ deccc(count, 4); // Can we do next iteration after this one?
1846 
1847     __ srlx(O4, 32, G3);
1848     __ bset(G3, O3);
1849     __ sllx(O4, 32, O4);
1850     __ srlx(G4, 32, G3);
1851     __ bset(G3, O4);
1852     if (use_bis) {
1853       __ stxa(O3, to, -16);
1854       __ stxa(O4, to, -8);
1855     } else {
1856       __ stx(O3, to, -16);
1857       __ stx(O4, to, -8);
1858     }
1859     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1860     __ delayed()->sllx(G4, 32,  O3);
1861 
1862   }
1863 
1864   //
1865   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
1866   //  If "aligned" is true, the "from" and "to" addresses are assumed
1867   //  to be heapword aligned.
1868   //
1869   // Arguments:
1870   //      from:  O0
1871   //      to:    O1
1872   //      count: O2 treated as signed
1873   //
1874   void generate_disjoint_int_copy_core(bool aligned) {
1875 
1876     Label L_skip_alignment, L_aligned_copy;
1877     Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1878 
1879     const Register from      = O0;   // source array address
1880     const Register to        = O1;   // destination array address
1881     const Register count     = O2;   // elements count
1882     const Register offset    = O5;   // offset from start of arrays
1883     // O3, O4, G3, G4 are used as temp registers
1884 
1885     // 'aligned' == true when it is known statically during compilation
1886     // of this arraycopy call site that both 'from' and 'to' addresses
1887     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1888     //
1889     // Aligned arrays have 4 bytes alignment in 32-bits VM
1890     // and 8 bytes - in 64-bits VM.
1891     //
1892     if (!aligned)
1893     {
1894       // The next check could be put under 'ifndef' since the code in
1895       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1896 
1897       // for short arrays, just do single element copy
1898       __ cmp(count, 5); // 4 + 1 (20 bytes)
1899       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1900       __ delayed()->mov(G0, offset);
1901 
1902       // copy 1 element to align 'to' on an 8 byte boundary
1903       __ andcc(to, 7, G0);
1904       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1905       __ delayed()->ld(from, 0, O3);
1906       __ inc(from, 4);
1907       __ inc(to, 4);
1908       __ dec(count);
1909       __ st(O3, to, -4);
1910     __ BIND(L_skip_alignment);
1911 
1912     // if arrays have same alignment mod 8, do 4 elements copy
1913       __ andcc(from, 7, G0);
1914       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1915       __ delayed()->ld(from, 0, O3);
1916 
1917     //
1918     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1919     // to form 2 aligned 8-bytes chunks to store.
1920     //
1921     // copy_16_bytes_forward_with_shift() is not used here since this
1922     // code is more optimal.
1923 
1924     // copy with shift 4 elements (16 bytes) at a time
1925       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
1926       __ sllx(O3, 32,  O3);
1927 
1928       disjoint_copy_core(from, to, count, 2, 16, &StubGenerator::copy_16_bytes_loop);
1929 
1930       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
1931       __ delayed()->inc(count, 4); // restore 'count'
1932 
1933     __ BIND(L_aligned_copy);
1934     } // !aligned
1935 
1936     // copy 4 elements (16 bytes) at a time
1937       __ and3(count, 1, G4); // Save
1938       __ srl(count, 1, count);
1939      generate_disjoint_long_copy_core(aligned);
1940       __ mov(G4, count);     // Restore
1941 
1942     // copy 1 element at a time
1943     __ BIND(L_copy_4_bytes);
1944       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1945     __ BIND(L_copy_4_bytes_loop);
1946       __ ld(from, offset, O3);
1947       __ deccc(count);
1948       __ st(O3, to, offset);
1949       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
1950       __ delayed()->inc(offset, 4);
1951     __ BIND(L_exit);
1952   }
1953 
1954   //
1955   //  Generate stub for disjoint int copy.  If "aligned" is true, the
1956   //  "from" and "to" addresses are assumed to be heapword aligned.
1957   //
1958   // Arguments for generated stub:
1959   //      from:  O0
1960   //      to:    O1
1961   //      count: O2 treated as signed
1962   //
1963   address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
1964     __ align(CodeEntryAlignment);
1965     StubCodeMark mark(this, "StubRoutines", name);
1966     address start = __ pc();
1967 
1968     const Register count = O2;
1969     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1970 
1971     if (entry != NULL) {
1972       *entry = __ pc();
1973       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1974       BLOCK_COMMENT("Entry:");
1975     }
1976 
1977     generate_disjoint_int_copy_core(aligned);
1978 
1979     // O3, O4 are used as temp registers
1980     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
1981     __ retl();
1982     __ delayed()->mov(G0, O0); // return 0
1983     return start;
1984   }
1985 
1986   //
1987   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
1988   //  If "aligned" is true, the "from" and "to" addresses are assumed
1989   //  to be heapword aligned.
1990   //
1991   // Arguments:
1992   //      from:  O0
1993   //      to:    O1
1994   //      count: O2 treated as signed
1995   //
1996   void generate_conjoint_int_copy_core(bool aligned) {
1997     // Do reverse copy.
1998 
1999     Label L_skip_alignment, L_aligned_copy;
2000     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
2001 
2002     const Register from      = O0;   // source array address
2003     const Register to        = O1;   // destination array address
2004     const Register count     = O2;   // elements count
2005     const Register end_from  = from; // source array end address
2006     const Register end_to    = to;   // destination array end address
2007     // O3, O4, O5, G3 are used as temp registers
2008 
2009     const Register byte_count = O3;  // bytes count to copy
2010 
2011       __ sllx(count, LogBytesPerInt, byte_count);
2012       __ add(to, byte_count, end_to); // offset after last copied element
2013 
2014       __ cmp(count, 5); // for short arrays, just do single element copy
2015       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
2016       __ delayed()->add(from, byte_count, end_from);
2017 
2018     // copy 1 element to align 'to' on an 8 byte boundary
2019       __ andcc(end_to, 7, G0);
2020       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
2021       __ delayed()->nop();
2022       __ dec(count);
2023       __ dec(end_from, 4);
2024       __ dec(end_to,   4);
2025       __ ld(end_from, 0, O4);
2026       __ st(O4, end_to, 0);
2027     __ BIND(L_skip_alignment);
2028 
2029     // Check if 'end_from' and 'end_to' has the same alignment.
2030       __ andcc(end_from, 7, G0);
2031       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2032       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
2033 
2034     // copy with shift 4 elements (16 bytes) at a time
2035     //
2036     // Load 2 aligned 8-bytes chunks and use one from previous iteration
2037     // to form 2 aligned 8-bytes chunks to store.
2038     //
2039       __ ldx(end_from, -4, O3);
2040       __ align(OptoLoopAlignment);
2041     __ BIND(L_copy_16_bytes);
2042       __ ldx(end_from, -12, O4);
2043       __ deccc(count, 4);
2044       __ ldx(end_from, -20, O5);
2045       __ dec(end_to, 16);
2046       __ dec(end_from, 16);
2047       __ srlx(O3, 32, O3);
2048       __ sllx(O4, 32, G3);
2049       __ bset(G3, O3);
2050       __ stx(O3, end_to, 8);
2051       __ srlx(O4, 32, O4);
2052       __ sllx(O5, 32, G3);
2053       __ bset(O4, G3);
2054       __ stx(G3, end_to, 0);
2055       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2056       __ delayed()->mov(O5, O3);
2057 
2058       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2059       __ delayed()->inc(count, 4);
2060 
2061     // copy 4 elements (16 bytes) at a time
2062       __ align(OptoLoopAlignment);
2063     __ BIND(L_aligned_copy);
2064       __ dec(end_from, 16);
2065       __ ldx(end_from, 8, O3);
2066       __ ldx(end_from, 0, O4);
2067       __ dec(end_to, 16);
2068       __ deccc(count, 4);
2069       __ stx(O3, end_to, 8);
2070       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2071       __ delayed()->stx(O4, end_to, 0);
2072       __ inc(count, 4);
2073 
2074     // copy 1 element (4 bytes) at a time
2075     __ BIND(L_copy_4_bytes);
2076       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2077     __ BIND(L_copy_4_bytes_loop);
2078       __ dec(end_from, 4);
2079       __ dec(end_to, 4);
2080       __ ld(end_from, 0, O4);
2081       __ deccc(count);
2082       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2083       __ delayed()->st(O4, end_to, 0);
2084     __ BIND(L_exit);
2085   }
2086 
2087   //
2088   //  Generate stub for conjoint int copy.  If "aligned" is true, the
2089   //  "from" and "to" addresses are assumed to be heapword aligned.
2090   //
2091   // Arguments for generated stub:
2092   //      from:  O0
2093   //      to:    O1
2094   //      count: O2 treated as signed
2095   //
2096   address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
2097                                      address *entry, const char *name) {
2098     __ align(CodeEntryAlignment);
2099     StubCodeMark mark(this, "StubRoutines", name);
2100     address start = __ pc();
2101 
2102     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2103 
2104     if (entry != NULL) {
2105       *entry = __ pc();
2106       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2107       BLOCK_COMMENT("Entry:");
2108     }
2109 
2110     array_overlap_test(nooverlap_target, 2);
2111 
2112     generate_conjoint_int_copy_core(aligned);
2113 
2114     // O3, O4 are used as temp registers
2115     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2116     __ retl();
2117     __ delayed()->mov(G0, O0); // return 0
2118     return start;
2119   }
2120 
2121   //
2122   // Helper methods for generate_disjoint_long_copy_core()
2123   //
2124   void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
2125                           Label& L_loop, bool use_prefetch, bool use_bis) {
2126     __ align(OptoLoopAlignment);
2127     __ BIND(L_loop);
2128     for (int off = 0; off < 64; off += 16) {
2129       if (use_prefetch && (off & 31) == 0) {
2130         if (ArraycopySrcPrefetchDistance > 0) {
2131           __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads);
2132         }
2133         if (ArraycopyDstPrefetchDistance > 0) {
2134           __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads);
2135         }
2136       }
2137       __ ldx(from,  off+0, O4);
2138       __ ldx(from,  off+8, O5);
2139       if (use_bis) {
2140         __ stxa(O4, to,  off+0);
2141         __ stxa(O5, to,  off+8);
2142       } else {
2143         __ stx(O4, to,  off+0);
2144         __ stx(O5, to,  off+8);
2145       }
2146     }
2147     __ deccc(count, 8);
2148     __ inc(from, 64);
2149     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
2150     __ delayed()->inc(to, 64);
2151   }
2152 
2153   //
2154   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
2155   //  "aligned" is ignored, because we must make the stronger
2156   //  assumption that both addresses are always 64-bit aligned.
2157   //
2158   // Arguments:
2159   //      from:  O0
2160   //      to:    O1
2161   //      count: O2 treated as signed
2162   //
2163   // count -= 2;
2164   // if ( count >= 0 ) { // >= 2 elements
2165   //   if ( count > 6) { // >= 8 elements
2166   //     count -= 6; // original count - 8
2167   //     do {
2168   //       copy_8_elements;
2169   //       count -= 8;
2170   //     } while ( count >= 0 );
2171   //     count += 6;
2172   //   }
2173   //   if ( count >= 0 ) { // >= 2 elements
2174   //     do {
2175   //       copy_2_elements;
2176   //     } while ( (count=count-2) >= 0 );
2177   //   }
2178   // }
2179   // count += 2;
2180   // if ( count != 0 ) { // 1 element left
2181   //   copy_1_element;
2182   // }
2183   //
2184   void generate_disjoint_long_copy_core(bool aligned) {
2185     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2186     const Register from    = O0;  // source array address
2187     const Register to      = O1;  // destination array address
2188     const Register count   = O2;  // elements count
2189     const Register offset0 = O4;  // element offset
2190     const Register offset8 = O5;  // next element offset
2191 
2192     __ deccc(count, 2);
2193     __ mov(G0, offset0);   // offset from start of arrays (0)
2194     __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2195     __ delayed()->add(offset0, 8, offset8);
2196 
2197     // Copy by 64 bytes chunks
2198 
2199     const Register from64 = O3;  // source address
2200     const Register to64   = G3;  // destination address
2201     __ subcc(count, 6, O3);
2202     __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
2203     __ delayed()->mov(to,   to64);
2204     // Now we can use O4(offset0), O5(offset8) as temps
2205     __ mov(O3, count);
2206     // count >= 0 (original count - 8)
2207     __ mov(from, from64);
2208 
2209     disjoint_copy_core(from64, to64, count, 3, 64, &StubGenerator::copy_64_bytes_loop);
2210 
2211       // Restore O4(offset0), O5(offset8)
2212       __ sub(from64, from, offset0);
2213       __ inccc(count, 6); // restore count
2214       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2215       __ delayed()->add(offset0, 8, offset8);
2216 
2217       // Copy by 16 bytes chunks
2218       __ align(OptoLoopAlignment);
2219     __ BIND(L_copy_16_bytes);
2220       __ ldx(from, offset0, O3);
2221       __ ldx(from, offset8, G3);
2222       __ deccc(count, 2);
2223       __ stx(O3, to, offset0);
2224       __ inc(offset0, 16);
2225       __ stx(G3, to, offset8);
2226       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2227       __ delayed()->inc(offset8, 16);
2228 
2229       // Copy last 8 bytes
2230     __ BIND(L_copy_8_bytes);
2231       __ inccc(count, 2);
2232       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2233       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2234       __ ldx(from, offset0, O3);
2235       __ stx(O3, to, offset0);
2236     __ BIND(L_exit);
2237   }
2238 
2239   //
2240   //  Generate stub for disjoint long copy.
2241   //  "aligned" is ignored, because we must make the stronger
2242   //  assumption that both addresses are always 64-bit aligned.
2243   //
2244   // Arguments for generated stub:
2245   //      from:  O0
2246   //      to:    O1
2247   //      count: O2 treated as signed
2248   //
2249   address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
2250     __ align(CodeEntryAlignment);
2251     StubCodeMark mark(this, "StubRoutines", name);
2252     address start = __ pc();
2253 
2254     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2255 
2256     if (entry != NULL) {
2257       *entry = __ pc();
2258       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2259       BLOCK_COMMENT("Entry:");
2260     }
2261 
2262     generate_disjoint_long_copy_core(aligned);
2263 
2264     // O3, O4 are used as temp registers
2265     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2266     __ retl();
2267     __ delayed()->mov(G0, O0); // return 0
2268     return start;
2269   }
2270 
2271   //
2272   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
2273   //  "aligned" is ignored, because we must make the stronger
2274   //  assumption that both addresses are always 64-bit aligned.
2275   //
2276   // Arguments:
2277   //      from:  O0
2278   //      to:    O1
2279   //      count: O2 treated as signed
2280   //
2281   void generate_conjoint_long_copy_core(bool aligned) {
2282     // Do reverse copy.
2283     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2284     const Register from    = O0;  // source array address
2285     const Register to      = O1;  // destination array address
2286     const Register count   = O2;  // elements count
2287     const Register offset8 = O4;  // element offset
2288     const Register offset0 = O5;  // previous element offset
2289 
2290       __ subcc(count, 1, count);
2291       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2292       __ delayed()->sllx(count, LogBytesPerLong, offset8);
2293       __ sub(offset8, 8, offset0);
2294       __ align(OptoLoopAlignment);
2295     __ BIND(L_copy_16_bytes);
2296       __ ldx(from, offset8, O2);
2297       __ ldx(from, offset0, O3);
2298       __ stx(O2, to, offset8);
2299       __ deccc(offset8, 16);      // use offset8 as counter
2300       __ stx(O3, to, offset0);
2301       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2302       __ delayed()->dec(offset0, 16);
2303 
2304     __ BIND(L_copy_8_bytes);
2305       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2306       __ delayed()->nop();
2307       __ ldx(from, 0, O3);
2308       __ stx(O3, to, 0);
2309     __ BIND(L_exit);
2310   }
2311 
2312   //  Generate stub for conjoint long copy.
2313   //  "aligned" is ignored, because we must make the stronger
2314   //  assumption that both addresses are always 64-bit aligned.
2315   //
2316   // Arguments for generated stub:
2317   //      from:  O0
2318   //      to:    O1
2319   //      count: O2 treated as signed
2320   //
2321   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2322                                       address *entry, const char *name) {
2323     __ align(CodeEntryAlignment);
2324     StubCodeMark mark(this, "StubRoutines", name);
2325     address start = __ pc();
2326 
2327     assert(aligned, "Should always be aligned");
2328 
2329     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2330 
2331     if (entry != NULL) {
2332       *entry = __ pc();
2333       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2334       BLOCK_COMMENT("Entry:");
2335     }
2336 
2337     array_overlap_test(nooverlap_target, 3);
2338 
2339     generate_conjoint_long_copy_core(aligned);
2340 
2341     // O3, O4 are used as temp registers
2342     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2343     __ retl();
2344     __ delayed()->mov(G0, O0); // return 0
2345     return start;
2346   }
2347 
2348   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2349   //  "from" and "to" addresses are assumed to be heapword aligned.
2350   //
2351   // Arguments for generated stub:
2352   //      from:  O0
2353   //      to:    O1
2354   //      count: O2 treated as signed
2355   //
2356   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
2357                                      bool dest_uninitialized = false) {
2358 
2359     const Register from  = O0;  // source array address
2360     const Register to    = O1;  // destination array address
2361     const Register count = O2;  // elements count
2362 
2363     __ align(CodeEntryAlignment);
2364     StubCodeMark mark(this, "StubRoutines", name);
2365     address start = __ pc();
2366 
2367     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2368 
2369     if (entry != NULL) {
2370       *entry = __ pc();
2371       // caller can pass a 64-bit byte count here
2372       BLOCK_COMMENT("Entry:");
2373     }
2374 
2375     // save arguments for barrier generation
2376     __ mov(to, G1);
2377     __ mov(count, G5);
2378     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
2379     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2380     if (UseCompressedOops) {
2381       generate_disjoint_int_copy_core(aligned);
2382     } else {
2383       generate_disjoint_long_copy_core(aligned);
2384     }
2385     // O0 is used as temp register
2386     gen_write_ref_array_post_barrier(G1, G5, O0);
2387 
2388     // O3, O4 are used as temp registers
2389     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2390     __ retl();
2391     __ delayed()->mov(G0, O0); // return 0
2392     return start;
2393   }
2394 
2395   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2396   //  "from" and "to" addresses are assumed to be heapword aligned.
2397   //
2398   // Arguments for generated stub:
2399   //      from:  O0
2400   //      to:    O1
2401   //      count: O2 treated as signed
2402   //
2403   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2404                                      address *entry, const char *name,
2405                                      bool dest_uninitialized = false) {
2406 
2407     const Register from  = O0;  // source array address
2408     const Register to    = O1;  // destination array address
2409     const Register count = O2;  // elements count
2410 
2411     __ align(CodeEntryAlignment);
2412     StubCodeMark mark(this, "StubRoutines", name);
2413     address start = __ pc();
2414 
2415     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2416 
2417     if (entry != NULL) {
2418       *entry = __ pc();
2419       // caller can pass a 64-bit byte count here
2420       BLOCK_COMMENT("Entry:");
2421     }
2422 
2423     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2424 
2425     // save arguments for barrier generation
2426     __ mov(to, G1);
2427     __ mov(count, G5);
2428     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
2429 
2430     if (UseCompressedOops) {
2431       generate_conjoint_int_copy_core(aligned);
2432     } else {
2433       generate_conjoint_long_copy_core(aligned);
2434     }
2435 
2436     // O0 is used as temp register
2437     gen_write_ref_array_post_barrier(G1, G5, O0);
2438 
2439     // O3, O4 are used as temp registers
2440     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2441     __ retl();
2442     __ delayed()->mov(G0, O0); // return 0
2443     return start;
2444   }
2445 
2446 
2447   // Helper for generating a dynamic type check.
2448   // Smashes only the given temp registers.
2449   void generate_type_check(Register sub_klass,
2450                            Register super_check_offset,
2451                            Register super_klass,
2452                            Register temp,
2453                            Label& L_success) {
2454     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2455 
2456     BLOCK_COMMENT("type_check:");
2457 
2458     Label L_miss, L_pop_to_miss;
2459 
2460     assert_clean_int(super_check_offset, temp);
2461 
2462     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2463                                      &L_success, &L_miss, NULL,
2464                                      super_check_offset);
2465 
2466     BLOCK_COMMENT("type_check_slow_path:");
2467     __ save_frame(0);
2468     __ check_klass_subtype_slow_path(sub_klass->after_save(),
2469                                      super_klass->after_save(),
2470                                      L0, L1, L2, L4,
2471                                      NULL, &L_pop_to_miss);
2472     __ ba(L_success);
2473     __ delayed()->restore();
2474 
2475     __ bind(L_pop_to_miss);
2476     __ restore();
2477 
2478     // Fall through on failure!
2479     __ BIND(L_miss);
2480   }
2481 
2482 
2483   //  Generate stub for checked oop copy.
2484   //
2485   // Arguments for generated stub:
2486   //      from:  O0
2487   //      to:    O1
2488   //      count: O2 treated as signed
2489   //      ckoff: O3 (super_check_offset)
2490   //      ckval: O4 (super_klass)
2491   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2492   //
2493   address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) {
2494 
2495     const Register O0_from   = O0;      // source array address
2496     const Register O1_to     = O1;      // destination array address
2497     const Register O2_count  = O2;      // elements count
2498     const Register O3_ckoff  = O3;      // super_check_offset
2499     const Register O4_ckval  = O4;      // super_klass
2500 
2501     const Register O5_offset = O5;      // loop var, with stride wordSize
2502     const Register G1_remain = G1;      // loop var, with stride -1
2503     const Register G3_oop    = G3;      // actual oop copied
2504     const Register G4_klass  = G4;      // oop._klass
2505     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2506 
2507     __ align(CodeEntryAlignment);
2508     StubCodeMark mark(this, "StubRoutines", name);
2509     address start = __ pc();
2510 
2511 #ifdef ASSERT
2512     // We sometimes save a frame (see generate_type_check below).
2513     // If this will cause trouble, let's fail now instead of later.
2514     __ save_frame(0);
2515     __ restore();
2516 #endif
2517 
2518     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
2519 
2520 #ifdef ASSERT
2521     // caller guarantees that the arrays really are different
2522     // otherwise, we would have to make conjoint checks
2523     { Label L;
2524       __ mov(O3, G1);           // spill: overlap test smashes O3
2525       __ mov(O4, G4);           // spill: overlap test smashes O4
2526       array_overlap_test(L, LogBytesPerHeapOop);
2527       __ stop("checkcast_copy within a single array");
2528       __ bind(L);
2529       __ mov(G1, O3);
2530       __ mov(G4, O4);
2531     }
2532 #endif //ASSERT
2533 
2534     if (entry != NULL) {
2535       *entry = __ pc();
2536       // caller can pass a 64-bit byte count here (from generic stub)
2537       BLOCK_COMMENT("Entry:");
2538     }
2539     gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
2540 
2541     Label load_element, store_element, do_card_marks, fail, done;
2542     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
2543     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2544     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
2545 
2546     // Empty array:  Nothing to do.
2547     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2548     __ retl();
2549     __ delayed()->set(0, O0);           // return 0 on (trivial) success
2550 
2551     // ======== begin loop ========
2552     // (Loop is rotated; its entry is load_element.)
2553     // Loop variables:
2554     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2555     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2556     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2557     __ align(OptoLoopAlignment);
2558 
2559     __ BIND(store_element);
2560     __ deccc(G1_remain);                // decrement the count
2561     __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2562     __ inc(O5_offset, heapOopSize);     // step to next offset
2563     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2564     __ delayed()->set(0, O0);           // return -1 on success
2565 
2566     // ======== loop entry is here ========
2567     __ BIND(load_element);
2568     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
2569     __ br_null_short(G3_oop, Assembler::pt, store_element);
2570 
2571     __ load_klass(G3_oop, G4_klass); // query the object klass
2572 
2573     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2574                         // branch to this on success:
2575                         store_element);
2576     // ======== end loop ========
2577 
2578     // It was a real error; we must depend on the caller to finish the job.
2579     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2580     // Emit GC store barriers for the oops we have copied (O2 minus G1),
2581     // and report their number to the caller.
2582     __ BIND(fail);
2583     __ subcc(O2_count, G1_remain, O2_count);
2584     __ brx(Assembler::zero, false, Assembler::pt, done);
2585     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
2586 
2587     __ BIND(do_card_marks);
2588     gen_write_ref_array_post_barrier(O1_to, O2_count, O3);   // store check on O1[0..O2]
2589 
2590     __ BIND(done);
2591     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2592     __ retl();
2593     __ delayed()->nop();             // return value in 00
2594 
2595     return start;
2596   }
2597 
2598 
2599   //  Generate 'unsafe' array copy stub
2600   //  Though just as safe as the other stubs, it takes an unscaled
2601   //  size_t argument instead of an element count.
2602   //
2603   // Arguments for generated stub:
2604   //      from:  O0
2605   //      to:    O1
2606   //      count: O2 byte count, treated as ssize_t, can be zero
2607   //
2608   // Examines the alignment of the operands and dispatches
2609   // to a long, int, short, or byte copy loop.
2610   //
2611   address generate_unsafe_copy(const char* name,
2612                                address byte_copy_entry,
2613                                address short_copy_entry,
2614                                address int_copy_entry,
2615                                address long_copy_entry) {
2616 
2617     const Register O0_from   = O0;      // source array address
2618     const Register O1_to     = O1;      // destination array address
2619     const Register O2_count  = O2;      // elements count
2620 
2621     const Register G1_bits   = G1;      // test copy of low bits
2622 
2623     __ align(CodeEntryAlignment);
2624     StubCodeMark mark(this, "StubRoutines", name);
2625     address start = __ pc();
2626 
2627     // bump this on entry, not on exit:
2628     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2629 
2630     __ or3(O0_from, O1_to, G1_bits);
2631     __ or3(O2_count,       G1_bits, G1_bits);
2632 
2633     __ btst(BytesPerLong-1, G1_bits);
2634     __ br(Assembler::zero, true, Assembler::pt,
2635           long_copy_entry, relocInfo::runtime_call_type);
2636     // scale the count on the way out:
2637     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2638 
2639     __ btst(BytesPerInt-1, G1_bits);
2640     __ br(Assembler::zero, true, Assembler::pt,
2641           int_copy_entry, relocInfo::runtime_call_type);
2642     // scale the count on the way out:
2643     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2644 
2645     __ btst(BytesPerShort-1, G1_bits);
2646     __ br(Assembler::zero, true, Assembler::pt,
2647           short_copy_entry, relocInfo::runtime_call_type);
2648     // scale the count on the way out:
2649     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2650 
2651     __ br(Assembler::always, false, Assembler::pt,
2652           byte_copy_entry, relocInfo::runtime_call_type);
2653     __ delayed()->nop();
2654 
2655     return start;
2656   }
2657 
2658 
2659   // Perform range checks on the proposed arraycopy.
2660   // Kills the two temps, but nothing else.
2661   // Also, clean the sign bits of src_pos and dst_pos.
2662   void arraycopy_range_checks(Register src,     // source array oop (O0)
2663                               Register src_pos, // source position (O1)
2664                               Register dst,     // destination array oo (O2)
2665                               Register dst_pos, // destination position (O3)
2666                               Register length,  // length of copy (O4)
2667                               Register temp1, Register temp2,
2668                               Label& L_failed) {
2669     BLOCK_COMMENT("arraycopy_range_checks:");
2670 
2671     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2672 
2673     const Register array_length = temp1;  // scratch
2674     const Register end_pos      = temp2;  // scratch
2675 
2676     // Note:  This next instruction may be in the delay slot of a branch:
2677     __ add(length, src_pos, end_pos);  // src_pos + length
2678     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2679     __ cmp(end_pos, array_length);
2680     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2681 
2682     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2683     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2684     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2685     __ cmp(end_pos, array_length);
2686     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2687 
2688     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2689     // Move with sign extension can be used since they are positive.
2690     __ delayed()->signx(src_pos, src_pos);
2691     __ signx(dst_pos, dst_pos);
2692 
2693     BLOCK_COMMENT("arraycopy_range_checks done");
2694   }
2695 
2696 
2697   //
2698   //  Generate generic array copy stubs
2699   //
2700   //  Input:
2701   //    O0    -  src oop
2702   //    O1    -  src_pos
2703   //    O2    -  dst oop
2704   //    O3    -  dst_pos
2705   //    O4    -  element count
2706   //
2707   //  Output:
2708   //    O0 ==  0  -  success
2709   //    O0 == -1  -  need to call System.arraycopy
2710   //
2711   address generate_generic_copy(const char *name,
2712                                 address entry_jbyte_arraycopy,
2713                                 address entry_jshort_arraycopy,
2714                                 address entry_jint_arraycopy,
2715                                 address entry_oop_arraycopy,
2716                                 address entry_jlong_arraycopy,
2717                                 address entry_checkcast_arraycopy) {
2718     Label L_failed, L_objArray;
2719 
2720     // Input registers
2721     const Register src      = O0;  // source array oop
2722     const Register src_pos  = O1;  // source position
2723     const Register dst      = O2;  // destination array oop
2724     const Register dst_pos  = O3;  // destination position
2725     const Register length   = O4;  // elements count
2726 
2727     // registers used as temp
2728     const Register G3_src_klass = G3; // source array klass
2729     const Register G4_dst_klass = G4; // destination array klass
2730     const Register G5_lh        = G5; // layout handler
2731     const Register O5_temp      = O5;
2732 
2733     __ align(CodeEntryAlignment);
2734     StubCodeMark mark(this, "StubRoutines", name);
2735     address start = __ pc();
2736 
2737     // bump this on entry, not on exit:
2738     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2739 
2740     // In principle, the int arguments could be dirty.
2741     //assert_clean_int(src_pos, G1);
2742     //assert_clean_int(dst_pos, G1);
2743     //assert_clean_int(length, G1);
2744 
2745     //-----------------------------------------------------------------------
2746     // Assembler stubs will be used for this call to arraycopy
2747     // if the following conditions are met:
2748     //
2749     // (1) src and dst must not be null.
2750     // (2) src_pos must not be negative.
2751     // (3) dst_pos must not be negative.
2752     // (4) length  must not be negative.
2753     // (5) src klass and dst klass should be the same and not NULL.
2754     // (6) src and dst should be arrays.
2755     // (7) src_pos + length must not exceed length of src.
2756     // (8) dst_pos + length must not exceed length of dst.
2757     BLOCK_COMMENT("arraycopy initial argument checks");
2758 
2759     //  if (src == NULL) return -1;
2760     __ br_null(src, false, Assembler::pn, L_failed);
2761 
2762     //  if (src_pos < 0) return -1;
2763     __ delayed()->tst(src_pos);
2764     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2765     __ delayed()->nop();
2766 
2767     //  if (dst == NULL) return -1;
2768     __ br_null(dst, false, Assembler::pn, L_failed);
2769 
2770     //  if (dst_pos < 0) return -1;
2771     __ delayed()->tst(dst_pos);
2772     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2773 
2774     //  if (length < 0) return -1;
2775     __ delayed()->tst(length);
2776     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2777 
2778     BLOCK_COMMENT("arraycopy argument klass checks");
2779     //  get src->klass()
2780     if (UseCompressedClassPointers) {
2781       __ delayed()->nop(); // ??? not good
2782       __ load_klass(src, G3_src_klass);
2783     } else {
2784       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2785     }
2786 
2787 #ifdef ASSERT
2788     //  assert(src->klass() != NULL);
2789     BLOCK_COMMENT("assert klasses not null");
2790     { Label L_a, L_b;
2791       __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
2792       __ bind(L_a);
2793       __ stop("broken null klass");
2794       __ bind(L_b);
2795       __ load_klass(dst, G4_dst_klass);
2796       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2797       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
2798       BLOCK_COMMENT("assert done");
2799     }
2800 #endif
2801 
2802     // Load layout helper
2803     //
2804     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2805     // 32        30    24            16              8     2                 0
2806     //
2807     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2808     //
2809 
2810     int lh_offset = in_bytes(Klass::layout_helper_offset());
2811 
2812     // Load 32-bits signed value. Use br() instruction with it to check icc.
2813     __ lduw(G3_src_klass, lh_offset, G5_lh);
2814 
2815     if (UseCompressedClassPointers) {
2816       __ load_klass(dst, G4_dst_klass);
2817     }
2818     // Handle objArrays completely differently...
2819     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2820     __ set(objArray_lh, O5_temp);
2821     __ cmp(G5_lh,       O5_temp);
2822     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2823     if (UseCompressedClassPointers) {
2824       __ delayed()->nop();
2825     } else {
2826       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2827     }
2828 
2829     //  if (src->klass() != dst->klass()) return -1;
2830     __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
2831 
2832     //  if (!src->is_Array()) return -1;
2833     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2834     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2835 
2836     // At this point, it is known to be a typeArray (array_tag 0x3).
2837 #ifdef ASSERT
2838     __ delayed()->nop();
2839     { Label L;
2840       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2841       __ set(lh_prim_tag_in_place, O5_temp);
2842       __ cmp(G5_lh,                O5_temp);
2843       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2844       __ delayed()->nop();
2845       __ stop("must be a primitive array");
2846       __ bind(L);
2847     }
2848 #else
2849     __ delayed();                               // match next insn to prev branch
2850 #endif
2851 
2852     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2853                            O5_temp, G4_dst_klass, L_failed);
2854 
2855     // TypeArrayKlass
2856     //
2857     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2858     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2859     //
2860 
2861     const Register G4_offset = G4_dst_klass;    // array offset
2862     const Register G3_elsize = G3_src_klass;    // log2 element size
2863 
2864     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2865     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2866     __ add(src, G4_offset, src);       // src array offset
2867     __ add(dst, G4_offset, dst);       // dst array offset
2868     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2869 
2870     // next registers should be set before the jump to corresponding stub
2871     const Register from     = O0;  // source array address
2872     const Register to       = O1;  // destination array address
2873     const Register count    = O2;  // elements count
2874 
2875     // 'from', 'to', 'count' registers should be set in this order
2876     // since they are the same as 'src', 'src_pos', 'dst'.
2877 
2878     BLOCK_COMMENT("scale indexes to element size");
2879     __ sll_ptr(src_pos, G3_elsize, src_pos);
2880     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2881     __ add(src, src_pos, from);       // src_addr
2882     __ add(dst, dst_pos, to);         // dst_addr
2883 
2884     BLOCK_COMMENT("choose copy loop based on element size");
2885     __ cmp(G3_elsize, 0);
2886     __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
2887     __ delayed()->signx(length, count); // length
2888 
2889     __ cmp(G3_elsize, LogBytesPerShort);
2890     __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
2891     __ delayed()->signx(length, count); // length
2892 
2893     __ cmp(G3_elsize, LogBytesPerInt);
2894     __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
2895     __ delayed()->signx(length, count); // length
2896 #ifdef ASSERT
2897     { Label L;
2898       __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
2899       __ stop("must be long copy, but elsize is wrong");
2900       __ bind(L);
2901     }
2902 #endif
2903     __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
2904     __ delayed()->signx(length, count); // length
2905 
2906     // ObjArrayKlass
2907   __ BIND(L_objArray);
2908     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
2909 
2910     Label L_plain_copy, L_checkcast_copy;
2911     //  test array classes for subtyping
2912     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
2913     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
2914     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
2915 
2916     // Identically typed arrays can be copied without element-wise checks.
2917     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2918                            O5_temp, G5_lh, L_failed);
2919 
2920     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2921     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2922     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2923     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2924     __ add(src, src_pos, from);       // src_addr
2925     __ add(dst, dst_pos, to);         // dst_addr
2926   __ BIND(L_plain_copy);
2927     __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
2928     __ delayed()->signx(length, count); // length
2929 
2930   __ BIND(L_checkcast_copy);
2931     // live at this point:  G3_src_klass, G4_dst_klass
2932     {
2933       // Before looking at dst.length, make sure dst is also an objArray.
2934       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
2935       __ cmp(G5_lh,                    O5_temp);
2936       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
2937 
2938       // It is safe to examine both src.length and dst.length.
2939       __ delayed();                             // match next insn to prev branch
2940       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2941                              O5_temp, G5_lh, L_failed);
2942 
2943       // Marshal the base address arguments now, freeing registers.
2944       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2945       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2946       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2947       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2948       __ add(src, src_pos, from);               // src_addr
2949       __ add(dst, dst_pos, to);                 // dst_addr
2950       __ signx(length, count);                  // length (reloaded)
2951 
2952       Register sco_temp = O3;                   // this register is free now
2953       assert_different_registers(from, to, count, sco_temp,
2954                                  G4_dst_klass, G3_src_klass);
2955 
2956       // Generate the type check.
2957       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2958       __ lduw(G4_dst_klass, sco_offset, sco_temp);
2959       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
2960                           O5_temp, L_plain_copy);
2961 
2962       // Fetch destination element klass from the ObjArrayKlass header.
2963       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2964 
2965       // the checkcast_copy loop needs two extra arguments:
2966       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
2967       // lduw(O4, sco_offset, O3);              // sco of elem klass
2968 
2969       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
2970       __ delayed()->lduw(O4, sco_offset, O3);
2971     }
2972 
2973   __ BIND(L_failed);
2974     __ retl();
2975     __ delayed()->sub(G0, 1, O0); // return -1
2976     return start;
2977   }
2978 
2979   //
2980   //  Generate stub for heap zeroing.
2981   //  "to" address is aligned to jlong (8 bytes).
2982   //
2983   // Arguments for generated stub:
2984   //      to:    O0
2985   //      count: O1 treated as signed (count of HeapWord)
2986   //             count could be 0
2987   //
2988   address generate_zero_aligned_words(const char* name) {
2989     __ align(CodeEntryAlignment);
2990     StubCodeMark mark(this, "StubRoutines", name);
2991     address start = __ pc();
2992 
2993     const Register to    = O0;   // source array address
2994     const Register count = O1;   // HeapWords count
2995     const Register temp  = O2;   // scratch
2996 
2997     Label Ldone;
2998     __ sllx(count, LogHeapWordSize, count); // to bytes count
2999     // Use BIS for zeroing
3000     __ bis_zeroing(to, count, temp, Ldone);
3001     __ bind(Ldone);
3002     __ retl();
3003     __ delayed()->nop();
3004     return start;
3005 }
3006 
3007   void generate_arraycopy_stubs() {
3008     address entry;
3009     address entry_jbyte_arraycopy;
3010     address entry_jshort_arraycopy;
3011     address entry_jint_arraycopy;
3012     address entry_oop_arraycopy;
3013     address entry_jlong_arraycopy;
3014     address entry_checkcast_arraycopy;
3015 
3016     //*** jbyte
3017     // Always need aligned and unaligned versions
3018     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
3019                                                                                   "jbyte_disjoint_arraycopy");
3020     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
3021                                                                                   &entry_jbyte_arraycopy,
3022                                                                                   "jbyte_arraycopy");
3023     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
3024                                                                                   "arrayof_jbyte_disjoint_arraycopy");
3025     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
3026                                                                                   "arrayof_jbyte_arraycopy");
3027 
3028     //*** jshort
3029     // Always need aligned and unaligned versions
3030     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
3031                                                                                     "jshort_disjoint_arraycopy");
3032     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
3033                                                                                     &entry_jshort_arraycopy,
3034                                                                                     "jshort_arraycopy");
3035     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3036                                                                                     "arrayof_jshort_disjoint_arraycopy");
3037     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
3038                                                                                     "arrayof_jshort_arraycopy");
3039 
3040     //*** jint
3041     // Aligned versions
3042     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
3043                                                                                 "arrayof_jint_disjoint_arraycopy");
3044     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
3045                                                                                 "arrayof_jint_arraycopy");
3046     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
3047     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
3048     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
3049                                                                                 "jint_disjoint_arraycopy");
3050     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
3051                                                                                 &entry_jint_arraycopy,
3052                                                                                 "jint_arraycopy");
3053 
3054     //*** jlong
3055     // It is always aligned
3056     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
3057                                                                                   "arrayof_jlong_disjoint_arraycopy");
3058     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
3059                                                                                   "arrayof_jlong_arraycopy");
3060     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
3061     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
3062 
3063 
3064     //*** oops
3065     // Aligned versions
3066     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
3067                                                                                       "arrayof_oop_disjoint_arraycopy");
3068     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
3069                                                                                       "arrayof_oop_arraycopy");
3070     // Aligned versions without pre-barriers
3071     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry,
3072                                                                                       "arrayof_oop_disjoint_arraycopy_uninit",
3073                                                                                       /*dest_uninitialized*/true);
3074     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
3075                                                                                       "arrayof_oop_arraycopy_uninit",
3076                                                                                       /*dest_uninitialized*/true);
3077     if (UseCompressedOops) {
3078       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
3079       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
3080                                                                                     "oop_disjoint_arraycopy");
3081       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3082                                                                                     "oop_arraycopy");
3083       // Unaligned versions without pre-barriers
3084       StubRoutines::_oop_disjoint_arraycopy_uninit     = generate_disjoint_oop_copy(false, &entry,
3085                                                                                     "oop_disjoint_arraycopy_uninit",
3086                                                                                     /*dest_uninitialized*/true);
3087       StubRoutines::_oop_arraycopy_uninit              = generate_conjoint_oop_copy(false, entry, NULL,
3088                                                                                     "oop_arraycopy_uninit",
3089                                                                                     /*dest_uninitialized*/true);
3090     } else
3091     {
3092       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
3093       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
3094       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
3095       StubRoutines::_oop_disjoint_arraycopy_uninit     = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit;
3096       StubRoutines::_oop_arraycopy_uninit              = StubRoutines::_arrayof_oop_arraycopy_uninit;
3097     }
3098 
3099     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3100     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
3101                                                                         /*dest_uninitialized*/true);
3102 
3103     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3104                                                               entry_jbyte_arraycopy,
3105                                                               entry_jshort_arraycopy,
3106                                                               entry_jint_arraycopy,
3107                                                               entry_jlong_arraycopy);
3108     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3109                                                                entry_jbyte_arraycopy,
3110                                                                entry_jshort_arraycopy,
3111                                                                entry_jint_arraycopy,
3112                                                                entry_oop_arraycopy,
3113                                                                entry_jlong_arraycopy,
3114                                                                entry_checkcast_arraycopy);
3115 
3116     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3117     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3118     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3119     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3120     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3121     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3122 
3123     if (UseBlockZeroing) {
3124       StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
3125     }
3126   }
3127 
3128   address generate_aescrypt_encryptBlock() {
3129     // required since we read expanded key 'int' array starting first element without alignment considerations
3130     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3131            "the following code assumes that first element of an int array is aligned to 8 bytes");
3132     __ align(CodeEntryAlignment);
3133     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
3134     Label L_load_misaligned_input, L_load_expanded_key, L_doLast128bit, L_storeOutput, L_store_misaligned_output;
3135     address start = __ pc();
3136     Register from = O0; // source byte array
3137     Register to = O1;   // destination byte array
3138     Register key = O2;  // expanded key array
3139     const Register keylen = O4; //reg for storing expanded key array length
3140 
3141     // read expanded key length
3142     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3143 
3144     // Method to address arbitrary alignment for load instructions:
3145     // Check last 3 bits of 'from' address to see if it is aligned to 8-byte boundary
3146     // If zero/aligned then continue with double FP load instructions
3147     // If not zero/mis-aligned then alignaddr will set GSR.align with number of bytes to skip during faligndata
3148     // alignaddr will also convert arbitrary aligned 'from' address to nearest 8-byte aligned address
3149     // load 3 * 8-byte components (to read 16 bytes input) in 3 different FP regs starting at this aligned address
3150     // faligndata will then extract (based on GSR.align value) the appropriate 8 bytes from the 2 source regs
3151 
3152     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3153     __ andcc(from, 7, G0);
3154     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
3155     __ delayed()->alignaddr(from, G0, from);
3156 
3157     // aligned case: load input into F54-F56
3158     __ ldf(FloatRegisterImpl::D, from, 0, F54);
3159     __ ldf(FloatRegisterImpl::D, from, 8, F56);
3160     __ ba_short(L_load_expanded_key);
3161 
3162     __ BIND(L_load_misaligned_input);
3163     __ ldf(FloatRegisterImpl::D, from, 0, F54);
3164     __ ldf(FloatRegisterImpl::D, from, 8, F56);
3165     __ ldf(FloatRegisterImpl::D, from, 16, F58);
3166     __ faligndata(F54, F56, F54);
3167     __ faligndata(F56, F58, F56);
3168 
3169     __ BIND(L_load_expanded_key);
3170     // Since we load expanded key buffers starting first element, 8-byte alignment is guaranteed
3171     for ( int i = 0;  i <= 38; i += 2 ) {
3172       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
3173     }
3174 
3175     // perform cipher transformation
3176     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
3177     __ fxor(FloatRegisterImpl::D, F2, F56, F56);
3178     // rounds 1 through 8
3179     for ( int i = 4;  i <= 28; i += 8 ) {
3180       __ aes_eround01(as_FloatRegister(i), F54, F56, F58);
3181       __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60);
3182       __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54);
3183       __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56);
3184     }
3185     __ aes_eround01(F36, F54, F56, F58); //round 9
3186     __ aes_eround23(F38, F54, F56, F60);
3187 
3188     // 128-bit original key size
3189     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit);
3190 
3191     for ( int i = 40;  i <= 50; i += 2 ) {
3192       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) );
3193     }
3194     __ aes_eround01(F40, F58, F60, F54); //round 10
3195     __ aes_eround23(F42, F58, F60, F56);
3196     __ aes_eround01(F44, F54, F56, F58); //round 11
3197     __ aes_eround23(F46, F54, F56, F60);
3198 
3199     // 192-bit original key size
3200     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput);
3201 
3202     __ ldf(FloatRegisterImpl::D, key, 208, F52);
3203     __ aes_eround01(F48, F58, F60, F54); //round 12
3204     __ aes_eround23(F50, F58, F60, F56);
3205     __ ldf(FloatRegisterImpl::D, key, 216, F46);
3206     __ ldf(FloatRegisterImpl::D, key, 224, F48);
3207     __ ldf(FloatRegisterImpl::D, key, 232, F50);
3208     __ aes_eround01(F52, F54, F56, F58); //round 13
3209     __ aes_eround23(F46, F54, F56, F60);
3210     __ ba_short(L_storeOutput);
3211 
3212     __ BIND(L_doLast128bit);
3213     __ ldf(FloatRegisterImpl::D, key, 160, F48);
3214     __ ldf(FloatRegisterImpl::D, key, 168, F50);
3215 
3216     __ BIND(L_storeOutput);
3217     // perform last round of encryption common for all key sizes
3218     __ aes_eround01_l(F48, F58, F60, F54); //last round
3219     __ aes_eround23_l(F50, F58, F60, F56);
3220 
3221     // Method to address arbitrary alignment for store instructions:
3222     // Check last 3 bits of 'dest' address to see if it is aligned to 8-byte boundary
3223     // If zero/aligned then continue with double FP store instructions
3224     // If not zero/mis-aligned then edge8n will generate edge mask in result reg (O3 in below case)
3225     // Example: If dest address is 0x07 and nearest 8-byte aligned address is 0x00 then edge mask will be 00000001
3226     // Compute (8-n) where n is # of bytes skipped by partial store(stpartialf) inst from edge mask, n=7 in this case
3227     // We get the value of n from the andcc that checks 'dest' alignment. n is available in O5 in below case.
3228     // Set GSR.align to (8-n) using alignaddr
3229     // Circular byte shift store values by n places so that the original bytes are at correct position for stpartialf
3230     // Set the arbitrarily aligned 'dest' address to nearest 8-byte aligned address
3231     // Store (partial) the original first (8-n) bytes starting at the original 'dest' address
3232     // Negate the edge mask so that the subsequent stpartialf can store the original (8-n-1)th through 8th bytes at appropriate address
3233     // We need to execute this process for both the 8-byte result values
3234 
3235     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3236     __ andcc(to, 7, O5);
3237     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
3238     __ delayed()->edge8n(to, G0, O3);
3239 
3240     // aligned case: store output into the destination array
3241     __ stf(FloatRegisterImpl::D, F54, to, 0);
3242     __ retl();
3243     __ delayed()->stf(FloatRegisterImpl::D, F56, to, 8);
3244 
3245     __ BIND(L_store_misaligned_output);
3246     __ add(to, 8, O4);
3247     __ mov(8, O2);
3248     __ sub(O2, O5, O2);
3249     __ alignaddr(O2, G0, O2);
3250     __ faligndata(F54, F54, F54);
3251     __ faligndata(F56, F56, F56);
3252     __ and3(to, -8, to);
3253     __ and3(O4, -8, O4);
3254     __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
3255     __ stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
3256     __ add(to, 8, to);
3257     __ add(O4, 8, O4);
3258     __ orn(G0, O3, O3);
3259     __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
3260     __ retl();
3261     __ delayed()->stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
3262 
3263     return start;
3264   }
3265 
3266   address generate_aescrypt_decryptBlock() {
3267     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3268            "the following code assumes that first element of an int array is aligned to 8 bytes");
3269     // required since we read original key 'byte' array as well in the decryption stubs
3270     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
3271            "the following code assumes that first element of a byte array is aligned to 8 bytes");
3272     __ align(CodeEntryAlignment);
3273     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
3274     address start = __ pc();
3275     Label L_load_misaligned_input, L_load_original_key, L_expand192bit, L_expand256bit, L_reload_misaligned_input;
3276     Label L_256bit_transform, L_common_transform, L_store_misaligned_output;
3277     Register from = O0; // source byte array
3278     Register to = O1;   // destination byte array
3279     Register key = O2;  // expanded key array
3280     Register original_key = O3;  // original key array only required during decryption
3281     const Register keylen = O4;  // reg for storing expanded key array length
3282 
3283     // read expanded key array length
3284     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3285 
3286     // save 'from' since we may need to recheck alignment in case of 256-bit decryption
3287     __ mov(from, G1);
3288 
3289     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3290     __ andcc(from, 7, G0);
3291     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
3292     __ delayed()->alignaddr(from, G0, from);
3293 
3294     // aligned case: load input into F52-F54
3295     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3296     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3297     __ ba_short(L_load_original_key);
3298 
3299     __ BIND(L_load_misaligned_input);
3300     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3301     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3302     __ ldf(FloatRegisterImpl::D, from, 16, F56);
3303     __ faligndata(F52, F54, F52);
3304     __ faligndata(F54, F56, F54);
3305 
3306     __ BIND(L_load_original_key);
3307     // load original key from SunJCE expanded decryption key
3308     // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
3309     for ( int i = 0;  i <= 3; i++ ) {
3310       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3311     }
3312 
3313     // 256-bit original key size
3314     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
3315 
3316     // 192-bit original key size
3317     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
3318 
3319     // 128-bit original key size
3320     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3321     for ( int i = 0;  i <= 36; i += 4 ) {
3322       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
3323       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
3324     }
3325 
3326     // perform 128-bit key specific inverse cipher transformation
3327     __ fxor(FloatRegisterImpl::D, F42, F54, F54);
3328     __ fxor(FloatRegisterImpl::D, F40, F52, F52);
3329     __ ba_short(L_common_transform);
3330 
3331     __ BIND(L_expand192bit);
3332 
3333     // start loading rest of the 192-bit key
3334     __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
3335     __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
3336 
3337     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3338     for ( int i = 0;  i <= 36; i += 6 ) {
3339       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
3340       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
3341       __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
3342     }
3343     __ aes_kexpand1(F42, F46, 7, F48);
3344     __ aes_kexpand2(F44, F48, F50);
3345 
3346     // perform 192-bit key specific inverse cipher transformation
3347     __ fxor(FloatRegisterImpl::D, F50, F54, F54);
3348     __ fxor(FloatRegisterImpl::D, F48, F52, F52);
3349     __ aes_dround23(F46, F52, F54, F58);
3350     __ aes_dround01(F44, F52, F54, F56);
3351     __ aes_dround23(F42, F56, F58, F54);
3352     __ aes_dround01(F40, F56, F58, F52);
3353     __ ba_short(L_common_transform);
3354 
3355     __ BIND(L_expand256bit);
3356 
3357     // load rest of the 256-bit key
3358     for ( int i = 4;  i <= 7; i++ ) {
3359       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3360     }
3361 
3362     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3363     for ( int i = 0;  i <= 40; i += 8 ) {
3364       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
3365       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
3366       __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
3367       __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
3368     }
3369     __ aes_kexpand1(F48, F54, 6, F56);
3370     __ aes_kexpand2(F50, F56, F58);
3371 
3372     for ( int i = 0;  i <= 6; i += 2 ) {
3373       __ fsrc2(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
3374     }
3375 
3376     // reload original 'from' address
3377     __ mov(G1, from);
3378 
3379     // re-check 8-byte alignment
3380     __ andcc(from, 7, G0);
3381     __ br(Assembler::notZero, true, Assembler::pn, L_reload_misaligned_input);
3382     __ delayed()->alignaddr(from, G0, from);
3383 
3384     // aligned case: load input into F52-F54
3385     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3386     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3387     __ ba_short(L_256bit_transform);
3388 
3389     __ BIND(L_reload_misaligned_input);
3390     __ ldf(FloatRegisterImpl::D, from, 0, F52);
3391     __ ldf(FloatRegisterImpl::D, from, 8, F54);
3392     __ ldf(FloatRegisterImpl::D, from, 16, F56);
3393     __ faligndata(F52, F54, F52);
3394     __ faligndata(F54, F56, F54);
3395 
3396     // perform 256-bit key specific inverse cipher transformation
3397     __ BIND(L_256bit_transform);
3398     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
3399     __ fxor(FloatRegisterImpl::D, F2, F52, F52);
3400     __ aes_dround23(F4, F52, F54, F58);
3401     __ aes_dround01(F6, F52, F54, F56);
3402     __ aes_dround23(F50, F56, F58, F54);
3403     __ aes_dround01(F48, F56, F58, F52);
3404     __ aes_dround23(F46, F52, F54, F58);
3405     __ aes_dround01(F44, F52, F54, F56);
3406     __ aes_dround23(F42, F56, F58, F54);
3407     __ aes_dround01(F40, F56, F58, F52);
3408 
3409     for ( int i = 0;  i <= 7; i++ ) {
3410       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3411     }
3412 
3413     // perform inverse cipher transformations common for all key sizes
3414     __ BIND(L_common_transform);
3415     for ( int i = 38;  i >= 6; i -= 8 ) {
3416       __ aes_dround23(as_FloatRegister(i), F52, F54, F58);
3417       __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56);
3418       if ( i != 6) {
3419         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54);
3420         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52);
3421       } else {
3422         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54);
3423         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52);
3424       }
3425     }
3426 
3427     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3428     __ andcc(to, 7, O5);
3429     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
3430     __ delayed()->edge8n(to, G0, O3);
3431 
3432     // aligned case: store output into the destination array
3433     __ stf(FloatRegisterImpl::D, F52, to, 0);
3434     __ retl();
3435     __ delayed()->stf(FloatRegisterImpl::D, F54, to, 8);
3436 
3437     __ BIND(L_store_misaligned_output);
3438     __ add(to, 8, O4);
3439     __ mov(8, O2);
3440     __ sub(O2, O5, O2);
3441     __ alignaddr(O2, G0, O2);
3442     __ faligndata(F52, F52, F52);
3443     __ faligndata(F54, F54, F54);
3444     __ and3(to, -8, to);
3445     __ and3(O4, -8, O4);
3446     __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
3447     __ stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
3448     __ add(to, 8, to);
3449     __ add(O4, 8, O4);
3450     __ orn(G0, O3, O3);
3451     __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
3452     __ retl();
3453     __ delayed()->stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
3454 
3455     return start;
3456   }
3457 
3458   address generate_cipherBlockChaining_encryptAESCrypt() {
3459     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3460            "the following code assumes that first element of an int array is aligned to 8 bytes");
3461     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
3462            "the following code assumes that first element of a byte array is aligned to 8 bytes");
3463     __ align(CodeEntryAlignment);
3464     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
3465     Label L_cbcenc128, L_load_misaligned_input_128bit, L_128bit_transform, L_store_misaligned_output_128bit;
3466     Label L_check_loop_end_128bit, L_cbcenc192, L_load_misaligned_input_192bit, L_192bit_transform;
3467     Label L_store_misaligned_output_192bit, L_check_loop_end_192bit, L_cbcenc256, L_load_misaligned_input_256bit;
3468     Label L_256bit_transform, L_store_misaligned_output_256bit, L_check_loop_end_256bit;
3469     address start = __ pc();
3470     Register from = I0; // source byte array
3471     Register to = I1;   // destination byte array
3472     Register key = I2;  // expanded key array
3473     Register rvec = I3; // init vector
3474     const Register len_reg = I4; // cipher length
3475     const Register keylen = I5;  // reg for storing expanded key array length
3476 
3477     __ save_frame(0);
3478     // save cipher len to return in the end
3479     __ mov(len_reg, L0);
3480 
3481     // read expanded key length
3482     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3483 
3484     // load initial vector, 8-byte alignment is guranteed
3485     __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
3486     __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
3487     // load key, 8-byte alignment is guranteed
3488     __ ldx(key,0,G1);
3489     __ ldx(key,8,G5);
3490 
3491     // start loading expanded key, 8-byte alignment is guranteed
3492     for ( int i = 0, j = 16;  i <= 38; i += 2, j += 8 ) {
3493       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
3494     }
3495 
3496     // 128-bit original key size
3497     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128);
3498 
3499     for ( int i = 40, j = 176;  i <= 46; i += 2, j += 8 ) {
3500       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
3501     }
3502 
3503     // 192-bit original key size
3504     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192);
3505 
3506     for ( int i = 48, j = 208;  i <= 54; i += 2, j += 8 ) {
3507       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
3508     }
3509 
3510     // 256-bit original key size
3511     __ ba_short(L_cbcenc256);
3512 
3513     __ align(OptoLoopAlignment);
3514     __ BIND(L_cbcenc128);
3515     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3516     __ andcc(from, 7, G0);
3517     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_128bit);
3518     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
3519 
3520     // aligned case: load input into G3 and G4
3521     __ ldx(from,0,G3);
3522     __ ldx(from,8,G4);
3523     __ ba_short(L_128bit_transform);
3524 
3525     __ BIND(L_load_misaligned_input_128bit);
3526     // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
3527     __ alignaddr(from, G0, from);
3528     __ ldf(FloatRegisterImpl::D, from, 0, F48);
3529     __ ldf(FloatRegisterImpl::D, from, 8, F50);
3530     __ ldf(FloatRegisterImpl::D, from, 16, F52);
3531     __ faligndata(F48, F50, F48);
3532     __ faligndata(F50, F52, F50);
3533     __ movdtox(F48, G3);
3534     __ movdtox(F50, G4);
3535     __ mov(L1, from);
3536 
3537     __ BIND(L_128bit_transform);
3538     __ xor3(G1,G3,G3);
3539     __ xor3(G5,G4,G4);
3540     __ movxtod(G3,F56);
3541     __ movxtod(G4,F58);
3542     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
3543     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
3544 
3545     // TEN_EROUNDS
3546     for ( int i = 0;  i <= 32; i += 8 ) {
3547       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
3548       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
3549       if (i != 32 ) {
3550         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
3551         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
3552       } else {
3553         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
3554         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
3555       }
3556     }
3557 
3558     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3559     __ andcc(to, 7, L1);
3560     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_128bit);
3561     __ delayed()->edge8n(to, G0, L2);
3562 
3563     // aligned case: store output into the destination array
3564     __ stf(FloatRegisterImpl::D, F60, to, 0);
3565     __ stf(FloatRegisterImpl::D, F62, to, 8);
3566     __ ba_short(L_check_loop_end_128bit);
3567 
3568     __ BIND(L_store_misaligned_output_128bit);
3569     __ add(to, 8, L3);
3570     __ mov(8, L4);
3571     __ sub(L4, L1, L4);
3572     __ alignaddr(L4, G0, L4);
3573     // save cipher text before circular right shift
3574     // as it needs to be stored as iv for next block (see code before next retl)
3575     __ movdtox(F60, L6);
3576     __ movdtox(F62, L7);
3577     __ faligndata(F60, F60, F60);
3578     __ faligndata(F62, F62, F62);
3579     __ mov(to, L5);
3580     __ and3(to, -8, to);
3581     __ and3(L3, -8, L3);
3582     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3583     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3584     __ add(to, 8, to);
3585     __ add(L3, 8, L3);
3586     __ orn(G0, L2, L2);
3587     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3588     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3589     __ mov(L5, to);
3590     __ movxtod(L6, F60);
3591     __ movxtod(L7, F62);
3592 
3593     __ BIND(L_check_loop_end_128bit);
3594     __ add(from, 16, from);
3595     __ add(to, 16, to);
3596     __ subcc(len_reg, 16, len_reg);
3597     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
3598     __ delayed()->nop();
3599     // re-init intial vector for next block, 8-byte alignment is guaranteed
3600     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
3601     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
3602     __ mov(L0, I0);
3603     __ ret();
3604     __ delayed()->restore();
3605 
3606     __ align(OptoLoopAlignment);
3607     __ BIND(L_cbcenc192);
3608     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3609     __ andcc(from, 7, G0);
3610     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_192bit);
3611     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
3612 
3613     // aligned case: load input into G3 and G4
3614     __ ldx(from,0,G3);
3615     __ ldx(from,8,G4);
3616     __ ba_short(L_192bit_transform);
3617 
3618     __ BIND(L_load_misaligned_input_192bit);
3619     // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
3620     __ alignaddr(from, G0, from);
3621     __ ldf(FloatRegisterImpl::D, from, 0, F48);
3622     __ ldf(FloatRegisterImpl::D, from, 8, F50);
3623     __ ldf(FloatRegisterImpl::D, from, 16, F52);
3624     __ faligndata(F48, F50, F48);
3625     __ faligndata(F50, F52, F50);
3626     __ movdtox(F48, G3);
3627     __ movdtox(F50, G4);
3628     __ mov(L1, from);
3629 
3630     __ BIND(L_192bit_transform);
3631     __ xor3(G1,G3,G3);
3632     __ xor3(G5,G4,G4);
3633     __ movxtod(G3,F56);
3634     __ movxtod(G4,F58);
3635     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
3636     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
3637 
3638     // TWELEVE_EROUNDS
3639     for ( int i = 0;  i <= 40; i += 8 ) {
3640       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
3641       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
3642       if (i != 40 ) {
3643         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
3644         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
3645       } else {
3646         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
3647         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
3648       }
3649     }
3650 
3651     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3652     __ andcc(to, 7, L1);
3653     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_192bit);
3654     __ delayed()->edge8n(to, G0, L2);
3655 
3656     // aligned case: store output into the destination array
3657     __ stf(FloatRegisterImpl::D, F60, to, 0);
3658     __ stf(FloatRegisterImpl::D, F62, to, 8);
3659     __ ba_short(L_check_loop_end_192bit);
3660 
3661     __ BIND(L_store_misaligned_output_192bit);
3662     __ add(to, 8, L3);
3663     __ mov(8, L4);
3664     __ sub(L4, L1, L4);
3665     __ alignaddr(L4, G0, L4);
3666     __ movdtox(F60, L6);
3667     __ movdtox(F62, L7);
3668     __ faligndata(F60, F60, F60);
3669     __ faligndata(F62, F62, F62);
3670     __ mov(to, L5);
3671     __ and3(to, -8, to);
3672     __ and3(L3, -8, L3);
3673     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3674     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3675     __ add(to, 8, to);
3676     __ add(L3, 8, L3);
3677     __ orn(G0, L2, L2);
3678     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3679     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3680     __ mov(L5, to);
3681     __ movxtod(L6, F60);
3682     __ movxtod(L7, F62);
3683 
3684     __ BIND(L_check_loop_end_192bit);
3685     __ add(from, 16, from);
3686     __ subcc(len_reg, 16, len_reg);
3687     __ add(to, 16, to);
3688     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
3689     __ delayed()->nop();
3690     // re-init intial vector for next block, 8-byte alignment is guaranteed
3691     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
3692     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
3693     __ mov(L0, I0);
3694     __ ret();
3695     __ delayed()->restore();
3696 
3697     __ align(OptoLoopAlignment);
3698     __ BIND(L_cbcenc256);
3699     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3700     __ andcc(from, 7, G0);
3701     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_256bit);
3702     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
3703 
3704     // aligned case: load input into G3 and G4
3705     __ ldx(from,0,G3);
3706     __ ldx(from,8,G4);
3707     __ ba_short(L_256bit_transform);
3708 
3709     __ BIND(L_load_misaligned_input_256bit);
3710     // cannot clobber F48, F50 and F52. F56, F58 can be used though
3711     __ alignaddr(from, G0, from);
3712     __ movdtox(F60, L2); // save F60 before overwriting
3713     __ ldf(FloatRegisterImpl::D, from, 0, F56);
3714     __ ldf(FloatRegisterImpl::D, from, 8, F58);
3715     __ ldf(FloatRegisterImpl::D, from, 16, F60);
3716     __ faligndata(F56, F58, F56);
3717     __ faligndata(F58, F60, F58);
3718     __ movdtox(F56, G3);
3719     __ movdtox(F58, G4);
3720     __ mov(L1, from);
3721     __ movxtod(L2, F60);
3722 
3723     __ BIND(L_256bit_transform);
3724     __ xor3(G1,G3,G3);
3725     __ xor3(G5,G4,G4);
3726     __ movxtod(G3,F56);
3727     __ movxtod(G4,F58);
3728     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
3729     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
3730 
3731     // FOURTEEN_EROUNDS
3732     for ( int i = 0;  i <= 48; i += 8 ) {
3733       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
3734       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
3735       if (i != 48 ) {
3736         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
3737         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
3738       } else {
3739         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
3740         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
3741       }
3742     }
3743 
3744     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3745     __ andcc(to, 7, L1);
3746     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_256bit);
3747     __ delayed()->edge8n(to, G0, L2);
3748 
3749     // aligned case: store output into the destination array
3750     __ stf(FloatRegisterImpl::D, F60, to, 0);
3751     __ stf(FloatRegisterImpl::D, F62, to, 8);
3752     __ ba_short(L_check_loop_end_256bit);
3753 
3754     __ BIND(L_store_misaligned_output_256bit);
3755     __ add(to, 8, L3);
3756     __ mov(8, L4);
3757     __ sub(L4, L1, L4);
3758     __ alignaddr(L4, G0, L4);
3759     __ movdtox(F60, L6);
3760     __ movdtox(F62, L7);
3761     __ faligndata(F60, F60, F60);
3762     __ faligndata(F62, F62, F62);
3763     __ mov(to, L5);
3764     __ and3(to, -8, to);
3765     __ and3(L3, -8, L3);
3766     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3767     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3768     __ add(to, 8, to);
3769     __ add(L3, 8, L3);
3770     __ orn(G0, L2, L2);
3771     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
3772     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
3773     __ mov(L5, to);
3774     __ movxtod(L6, F60);
3775     __ movxtod(L7, F62);
3776 
3777     __ BIND(L_check_loop_end_256bit);
3778     __ add(from, 16, from);
3779     __ subcc(len_reg, 16, len_reg);
3780     __ add(to, 16, to);
3781     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
3782     __ delayed()->nop();
3783     // re-init intial vector for next block, 8-byte alignment is guaranteed
3784     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
3785     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
3786     __ mov(L0, I0);
3787     __ ret();
3788     __ delayed()->restore();
3789 
3790     return start;
3791   }
3792 
3793   address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
3794     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
3795            "the following code assumes that first element of an int array is aligned to 8 bytes");
3796     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
3797            "the following code assumes that first element of a byte array is aligned to 8 bytes");
3798     __ align(CodeEntryAlignment);
3799     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
3800     Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
3801     Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
3802     Label L_load_misaligned_input_first_block, L_transform_first_block, L_load_misaligned_next2_blocks128, L_transform_next2_blocks128;
3803     Label L_load_misaligned_next2_blocks192, L_transform_next2_blocks192, L_load_misaligned_next2_blocks256, L_transform_next2_blocks256;
3804     Label L_store_misaligned_output_first_block, L_check_decrypt_end, L_store_misaligned_output_next2_blocks128;
3805     Label L_check_decrypt_loop_end128, L_store_misaligned_output_next2_blocks192, L_check_decrypt_loop_end192;
3806     Label L_store_misaligned_output_next2_blocks256, L_check_decrypt_loop_end256;
3807     address start = __ pc();
3808     Register from = I0; // source byte array
3809     Register to = I1;   // destination byte array
3810     Register key = I2;  // expanded key array
3811     Register rvec = I3; // init vector
3812     const Register len_reg = I4; // cipher length
3813     const Register original_key = I5;  // original key array only required during decryption
3814     const Register keylen = L6;  // reg for storing expanded key array length
3815 
3816     __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
3817     // save cipher len to return in the end
3818     __ mov(len_reg, L7);
3819 
3820     // load original key from SunJCE expanded decryption key
3821     // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
3822     for ( int i = 0;  i <= 3; i++ ) {
3823       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3824     }
3825 
3826     // load initial vector, 8-byte alignment is guaranteed
3827     __ ldx(rvec,0,L0);
3828     __ ldx(rvec,8,L1);
3829 
3830     // read expanded key array length
3831     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
3832 
3833     // 256-bit original key size
3834     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
3835 
3836     // 192-bit original key size
3837     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
3838 
3839     // 128-bit original key size
3840     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3841     for ( int i = 0;  i <= 36; i += 4 ) {
3842       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
3843       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
3844     }
3845 
3846     // load expanded key[last-1] and key[last] elements
3847     __ movdtox(F40,L2);
3848     __ movdtox(F42,L3);
3849 
3850     __ and3(len_reg, 16, L4);
3851     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks128);
3852     __ nop();
3853 
3854     __ ba_short(L_dec_first_block_start);
3855 
3856     __ BIND(L_expand192bit);
3857     // load rest of the 192-bit key
3858     __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
3859     __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
3860 
3861     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3862     for ( int i = 0;  i <= 36; i += 6 ) {
3863       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
3864       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
3865       __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
3866     }
3867     __ aes_kexpand1(F42, F46, 7, F48);
3868     __ aes_kexpand2(F44, F48, F50);
3869 
3870     // load expanded key[last-1] and key[last] elements
3871     __ movdtox(F48,L2);
3872     __ movdtox(F50,L3);
3873 
3874     __ and3(len_reg, 16, L4);
3875     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks192);
3876     __ nop();
3877 
3878     __ ba_short(L_dec_first_block_start);
3879 
3880     __ BIND(L_expand256bit);
3881     // load rest of the 256-bit key
3882     for ( int i = 4;  i <= 7; i++ ) {
3883       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
3884     }
3885 
3886     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
3887     for ( int i = 0;  i <= 40; i += 8 ) {
3888       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
3889       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
3890       __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
3891       __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
3892     }
3893     __ aes_kexpand1(F48, F54, 6, F56);
3894     __ aes_kexpand2(F50, F56, F58);
3895 
3896     // load expanded key[last-1] and key[last] elements
3897     __ movdtox(F56,L2);
3898     __ movdtox(F58,L3);
3899 
3900     __ and3(len_reg, 16, L4);
3901     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks256);
3902 
3903     __ BIND(L_dec_first_block_start);
3904     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
3905     __ andcc(from, 7, G0);
3906     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_first_block);
3907     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
3908 
3909     // aligned case: load input into L4 and L5
3910     __ ldx(from,0,L4);
3911     __ ldx(from,8,L5);
3912     __ ba_short(L_transform_first_block);
3913 
3914     __ BIND(L_load_misaligned_input_first_block);
3915     __ alignaddr(from, G0, from);
3916     // F58, F60, F62 can be clobbered
3917     __ ldf(FloatRegisterImpl::D, from, 0, F58);
3918     __ ldf(FloatRegisterImpl::D, from, 8, F60);
3919     __ ldf(FloatRegisterImpl::D, from, 16, F62);
3920     __ faligndata(F58, F60, F58);
3921     __ faligndata(F60, F62, F60);
3922     __ movdtox(F58, L4);
3923     __ movdtox(F60, L5);
3924     __ mov(G1, from);
3925 
3926     __ BIND(L_transform_first_block);
3927     __ xor3(L2,L4,G1);
3928     __ movxtod(G1,F60);
3929     __ xor3(L3,L5,G1);
3930     __ movxtod(G1,F62);
3931 
3932     // 128-bit original key size
3933     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128);
3934 
3935     // 192-bit original key size
3936     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192);
3937 
3938     __ aes_dround23(F54, F60, F62, F58);
3939     __ aes_dround01(F52, F60, F62, F56);
3940     __ aes_dround23(F50, F56, F58, F62);
3941     __ aes_dround01(F48, F56, F58, F60);
3942 
3943     __ BIND(L_dec_first_block192);
3944     __ aes_dround23(F46, F60, F62, F58);
3945     __ aes_dround01(F44, F60, F62, F56);
3946     __ aes_dround23(F42, F56, F58, F62);
3947     __ aes_dround01(F40, F56, F58, F60);
3948 
3949     __ BIND(L_dec_first_block128);
3950     for ( int i = 38;  i >= 6; i -= 8 ) {
3951       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
3952       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
3953       if ( i != 6) {
3954         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
3955         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
3956       } else {
3957         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
3958         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
3959       }
3960     }
3961 
3962     __ movxtod(L0,F56);
3963     __ movxtod(L1,F58);
3964     __ mov(L4,L0);
3965     __ mov(L5,L1);
3966     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
3967     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
3968 
3969     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
3970     __ andcc(to, 7, G1);
3971     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_first_block);
3972     __ delayed()->edge8n(to, G0, G2);
3973 
3974     // aligned case: store output into the destination array
3975     __ stf(FloatRegisterImpl::D, F60, to, 0);
3976     __ stf(FloatRegisterImpl::D, F62, to, 8);
3977     __ ba_short(L_check_decrypt_end);
3978 
3979     __ BIND(L_store_misaligned_output_first_block);
3980     __ add(to, 8, G3);
3981     __ mov(8, G4);
3982     __ sub(G4, G1, G4);
3983     __ alignaddr(G4, G0, G4);
3984     __ faligndata(F60, F60, F60);
3985     __ faligndata(F62, F62, F62);
3986     __ mov(to, G1);
3987     __ and3(to, -8, to);
3988     __ and3(G3, -8, G3);
3989     __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
3990     __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
3991     __ add(to, 8, to);
3992     __ add(G3, 8, G3);
3993     __ orn(G0, G2, G2);
3994     __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
3995     __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
3996     __ mov(G1, to);
3997 
3998     __ BIND(L_check_decrypt_end);
3999     __ add(from, 16, from);
4000     __ add(to, 16, to);
4001     __ subcc(len_reg, 16, len_reg);
4002     __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end);
4003     __ delayed()->nop();
4004 
4005     // 256-bit original key size
4006     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256);
4007 
4008     // 192-bit original key size
4009     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192);
4010 
4011     __ align(OptoLoopAlignment);
4012     __ BIND(L_dec_next2_blocks128);
4013     __ nop();
4014 
4015     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
4016     __ andcc(from, 7, G0);
4017     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks128);
4018     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
4019 
4020     // aligned case: load input into G4, G5, L4 and L5
4021     __ ldx(from,0,G4);
4022     __ ldx(from,8,G5);
4023     __ ldx(from,16,L4);
4024     __ ldx(from,24,L5);
4025     __ ba_short(L_transform_next2_blocks128);
4026 
4027     __ BIND(L_load_misaligned_next2_blocks128);
4028     __ alignaddr(from, G0, from);
4029     // F40, F42, F58, F60, F62 can be clobbered
4030     __ ldf(FloatRegisterImpl::D, from, 0, F40);
4031     __ ldf(FloatRegisterImpl::D, from, 8, F42);
4032     __ ldf(FloatRegisterImpl::D, from, 16, F60);
4033     __ ldf(FloatRegisterImpl::D, from, 24, F62);
4034     __ ldf(FloatRegisterImpl::D, from, 32, F58);
4035     __ faligndata(F40, F42, F40);
4036     __ faligndata(F42, F60, F42);
4037     __ faligndata(F60, F62, F60);
4038     __ faligndata(F62, F58, F62);
4039     __ movdtox(F40, G4);
4040     __ movdtox(F42, G5);
4041     __ movdtox(F60, L4);
4042     __ movdtox(F62, L5);
4043     __ mov(G1, from);
4044 
4045     __ BIND(L_transform_next2_blocks128);
4046     // F40:F42 used for first 16-bytes
4047     __ xor3(L2,G4,G1);
4048     __ movxtod(G1,F40);
4049     __ xor3(L3,G5,G1);
4050     __ movxtod(G1,F42);
4051 
4052     // F60:F62 used for next 16-bytes
4053     __ xor3(L2,L4,G1);
4054     __ movxtod(G1,F60);
4055     __ xor3(L3,L5,G1);
4056     __ movxtod(G1,F62);
4057 
4058     for ( int i = 38;  i >= 6; i -= 8 ) {
4059       __ aes_dround23(as_FloatRegister(i), F40, F42, F44);
4060       __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46);
4061       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
4062       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
4063       if (i != 6 ) {
4064         __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42);
4065         __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40);
4066         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
4067         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
4068       } else {
4069         __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42);
4070         __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40);
4071         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
4072         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
4073       }
4074     }
4075 
4076     __ movxtod(L0,F46);
4077     __ movxtod(L1,F44);
4078     __ fxor(FloatRegisterImpl::D, F46, F40, F40);
4079     __ fxor(FloatRegisterImpl::D, F44, F42, F42);
4080 
4081     __ movxtod(G4,F56);
4082     __ movxtod(G5,F58);
4083     __ mov(L4,L0);
4084     __ mov(L5,L1);
4085     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
4086     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
4087 
4088     // For mis-aligned store of 32 bytes of result we can do:
4089     // Circular right-shift all 4 FP registers so that 'head' and 'tail'
4090     // parts that need to be stored starting at mis-aligned address are in a FP reg
4091     // the other 3 FP regs can thus be stored using regular store
4092     // we then use the edge + partial-store mechanism to store the 'head' and 'tail' parts
4093 
4094     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
4095     __ andcc(to, 7, G1);
4096     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks128);
4097     __ delayed()->edge8n(to, G0, G2);
4098 
4099     // aligned case: store output into the destination array
4100     __ stf(FloatRegisterImpl::D, F40, to, 0);
4101     __ stf(FloatRegisterImpl::D, F42, to, 8);
4102     __ stf(FloatRegisterImpl::D, F60, to, 16);
4103     __ stf(FloatRegisterImpl::D, F62, to, 24);
4104     __ ba_short(L_check_decrypt_loop_end128);
4105 
4106     __ BIND(L_store_misaligned_output_next2_blocks128);
4107     __ mov(8, G4);
4108     __ sub(G4, G1, G4);
4109     __ alignaddr(G4, G0, G4);
4110     __ faligndata(F40, F42, F56); // F56 can be clobbered
4111     __ faligndata(F42, F60, F42);
4112     __ faligndata(F60, F62, F60);
4113     __ faligndata(F62, F40, F40);
4114     __ mov(to, G1);
4115     __ and3(to, -8, to);
4116     __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
4117     __ stf(FloatRegisterImpl::D, F56, to, 8);
4118     __ stf(FloatRegisterImpl::D, F42, to, 16);
4119     __ stf(FloatRegisterImpl::D, F60, to, 24);
4120     __ add(to, 32, to);
4121     __ orn(G0, G2, G2);
4122     __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
4123     __ mov(G1, to);
4124 
4125     __ BIND(L_check_decrypt_loop_end128);
4126     __ add(from, 32, from);
4127     __ add(to, 32, to);
4128     __ subcc(len_reg, 32, len_reg);
4129     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
4130     __ delayed()->nop();
4131     __ ba_short(L_cbcdec_end);
4132 
4133     __ align(OptoLoopAlignment);
4134     __ BIND(L_dec_next2_blocks192);
4135     __ nop();
4136 
4137     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
4138     __ andcc(from, 7, G0);
4139     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks192);
4140     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
4141 
4142     // aligned case: load input into G4, G5, L4 and L5
4143     __ ldx(from,0,G4);
4144     __ ldx(from,8,G5);
4145     __ ldx(from,16,L4);
4146     __ ldx(from,24,L5);
4147     __ ba_short(L_transform_next2_blocks192);
4148 
4149     __ BIND(L_load_misaligned_next2_blocks192);
4150     __ alignaddr(from, G0, from);
4151     // F48, F50, F52, F60, F62 can be clobbered
4152     __ ldf(FloatRegisterImpl::D, from, 0, F48);
4153     __ ldf(FloatRegisterImpl::D, from, 8, F50);
4154     __ ldf(FloatRegisterImpl::D, from, 16, F60);
4155     __ ldf(FloatRegisterImpl::D, from, 24, F62);
4156     __ ldf(FloatRegisterImpl::D, from, 32, F52);
4157     __ faligndata(F48, F50, F48);
4158     __ faligndata(F50, F60, F50);
4159     __ faligndata(F60, F62, F60);
4160     __ faligndata(F62, F52, F62);
4161     __ movdtox(F48, G4);
4162     __ movdtox(F50, G5);
4163     __ movdtox(F60, L4);
4164     __ movdtox(F62, L5);
4165     __ mov(G1, from);
4166 
4167     __ BIND(L_transform_next2_blocks192);
4168     // F48:F50 used for first 16-bytes
4169     __ xor3(L2,G4,G1);
4170     __ movxtod(G1,F48);
4171     __ xor3(L3,G5,G1);
4172     __ movxtod(G1,F50);
4173 
4174     // F60:F62 used for next 16-bytes
4175     __ xor3(L2,L4,G1);
4176     __ movxtod(G1,F60);
4177     __ xor3(L3,L5,G1);
4178     __ movxtod(G1,F62);
4179 
4180     for ( int i = 46;  i >= 6; i -= 8 ) {
4181       __ aes_dround23(as_FloatRegister(i), F48, F50, F52);
4182       __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54);
4183       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
4184       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
4185       if (i != 6 ) {
4186         __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50);
4187         __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48);
4188         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
4189         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
4190       } else {
4191         __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50);
4192         __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48);
4193         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
4194         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
4195       }
4196     }
4197 
4198     __ movxtod(L0,F54);
4199     __ movxtod(L1,F52);
4200     __ fxor(FloatRegisterImpl::D, F54, F48, F48);
4201     __ fxor(FloatRegisterImpl::D, F52, F50, F50);
4202 
4203     __ movxtod(G4,F56);
4204     __ movxtod(G5,F58);
4205     __ mov(L4,L0);
4206     __ mov(L5,L1);
4207     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
4208     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
4209 
4210     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
4211     __ andcc(to, 7, G1);
4212     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks192);
4213     __ delayed()->edge8n(to, G0, G2);
4214 
4215     // aligned case: store output into the destination array
4216     __ stf(FloatRegisterImpl::D, F48, to, 0);
4217     __ stf(FloatRegisterImpl::D, F50, to, 8);
4218     __ stf(FloatRegisterImpl::D, F60, to, 16);
4219     __ stf(FloatRegisterImpl::D, F62, to, 24);
4220     __ ba_short(L_check_decrypt_loop_end192);
4221 
4222     __ BIND(L_store_misaligned_output_next2_blocks192);
4223     __ mov(8, G4);
4224     __ sub(G4, G1, G4);
4225     __ alignaddr(G4, G0, G4);
4226     __ faligndata(F48, F50, F56); // F56 can be clobbered
4227     __ faligndata(F50, F60, F50);
4228     __ faligndata(F60, F62, F60);
4229     __ faligndata(F62, F48, F48);
4230     __ mov(to, G1);
4231     __ and3(to, -8, to);
4232     __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
4233     __ stf(FloatRegisterImpl::D, F56, to, 8);
4234     __ stf(FloatRegisterImpl::D, F50, to, 16);
4235     __ stf(FloatRegisterImpl::D, F60, to, 24);
4236     __ add(to, 32, to);
4237     __ orn(G0, G2, G2);
4238     __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
4239     __ mov(G1, to);
4240 
4241     __ BIND(L_check_decrypt_loop_end192);
4242     __ add(from, 32, from);
4243     __ add(to, 32, to);
4244     __ subcc(len_reg, 32, len_reg);
4245     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
4246     __ delayed()->nop();
4247     __ ba_short(L_cbcdec_end);
4248 
4249     __ align(OptoLoopAlignment);
4250     __ BIND(L_dec_next2_blocks256);
4251     __ nop();
4252 
4253     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
4254     __ andcc(from, 7, G0);
4255     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks256);
4256     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
4257 
4258     // aligned case: load input into G4, G5, L4 and L5
4259     __ ldx(from,0,G4);
4260     __ ldx(from,8,G5);
4261     __ ldx(from,16,L4);
4262     __ ldx(from,24,L5);
4263     __ ba_short(L_transform_next2_blocks256);
4264 
4265     __ BIND(L_load_misaligned_next2_blocks256);
4266     __ alignaddr(from, G0, from);
4267     // F0, F2, F4, F60, F62 can be clobbered
4268     __ ldf(FloatRegisterImpl::D, from, 0, F0);
4269     __ ldf(FloatRegisterImpl::D, from, 8, F2);
4270     __ ldf(FloatRegisterImpl::D, from, 16, F60);
4271     __ ldf(FloatRegisterImpl::D, from, 24, F62);
4272     __ ldf(FloatRegisterImpl::D, from, 32, F4);
4273     __ faligndata(F0, F2, F0);
4274     __ faligndata(F2, F60, F2);
4275     __ faligndata(F60, F62, F60);
4276     __ faligndata(F62, F4, F62);
4277     __ movdtox(F0, G4);
4278     __ movdtox(F2, G5);
4279     __ movdtox(F60, L4);
4280     __ movdtox(F62, L5);
4281     __ mov(G1, from);
4282 
4283     __ BIND(L_transform_next2_blocks256);
4284     // F0:F2 used for first 16-bytes
4285     __ xor3(L2,G4,G1);
4286     __ movxtod(G1,F0);
4287     __ xor3(L3,G5,G1);
4288     __ movxtod(G1,F2);
4289 
4290     // F60:F62 used for next 16-bytes
4291     __ xor3(L2,L4,G1);
4292     __ movxtod(G1,F60);
4293     __ xor3(L3,L5,G1);
4294     __ movxtod(G1,F62);
4295 
4296     __ aes_dround23(F54, F0, F2, F4);
4297     __ aes_dround01(F52, F0, F2, F6);
4298     __ aes_dround23(F54, F60, F62, F58);
4299     __ aes_dround01(F52, F60, F62, F56);
4300     __ aes_dround23(F50, F6, F4, F2);
4301     __ aes_dround01(F48, F6, F4, F0);
4302     __ aes_dround23(F50, F56, F58, F62);
4303     __ aes_dround01(F48, F56, F58, F60);
4304     // save F48:F54 in temp registers
4305     __ movdtox(F54,G2);
4306     __ movdtox(F52,G3);
4307     __ movdtox(F50,G6);
4308     __ movdtox(F48,G1);
4309     for ( int i = 46;  i >= 14; i -= 8 ) {
4310       __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
4311       __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6);
4312       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
4313       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
4314       __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2);
4315       __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0);
4316       __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
4317       __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
4318     }
4319     // init F48:F54 with F0:F6 values (original key)
4320     __ ldf(FloatRegisterImpl::D, original_key, 0, F48);
4321     __ ldf(FloatRegisterImpl::D, original_key, 8, F50);
4322     __ ldf(FloatRegisterImpl::D, original_key, 16, F52);
4323     __ ldf(FloatRegisterImpl::D, original_key, 24, F54);
4324     __ aes_dround23(F54, F0, F2, F4);
4325     __ aes_dround01(F52, F0, F2, F6);
4326     __ aes_dround23(F54, F60, F62, F58);
4327     __ aes_dround01(F52, F60, F62, F56);
4328     __ aes_dround23_l(F50, F6, F4, F2);
4329     __ aes_dround01_l(F48, F6, F4, F0);
4330     __ aes_dround23_l(F50, F56, F58, F62);
4331     __ aes_dround01_l(F48, F56, F58, F60);
4332     // re-init F48:F54 with their original values
4333     __ movxtod(G2,F54);
4334     __ movxtod(G3,F52);
4335     __ movxtod(G6,F50);
4336     __ movxtod(G1,F48);
4337 
4338     __ movxtod(L0,F6);
4339     __ movxtod(L1,F4);
4340     __ fxor(FloatRegisterImpl::D, F6, F0, F0);
4341     __ fxor(FloatRegisterImpl::D, F4, F2, F2);
4342 
4343     __ movxtod(G4,F56);
4344     __ movxtod(G5,F58);
4345     __ mov(L4,L0);
4346     __ mov(L5,L1);
4347     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
4348     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
4349 
4350     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
4351     __ andcc(to, 7, G1);
4352     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks256);
4353     __ delayed()->edge8n(to, G0, G2);
4354 
4355     // aligned case: store output into the destination array
4356     __ stf(FloatRegisterImpl::D, F0, to, 0);
4357     __ stf(FloatRegisterImpl::D, F2, to, 8);
4358     __ stf(FloatRegisterImpl::D, F60, to, 16);
4359     __ stf(FloatRegisterImpl::D, F62, to, 24);
4360     __ ba_short(L_check_decrypt_loop_end256);
4361 
4362     __ BIND(L_store_misaligned_output_next2_blocks256);
4363     __ mov(8, G4);
4364     __ sub(G4, G1, G4);
4365     __ alignaddr(G4, G0, G4);
4366     __ faligndata(F0, F2, F56); // F56 can be clobbered
4367     __ faligndata(F2, F60, F2);
4368     __ faligndata(F60, F62, F60);
4369     __ faligndata(F62, F0, F0);
4370     __ mov(to, G1);
4371     __ and3(to, -8, to);
4372     __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
4373     __ stf(FloatRegisterImpl::D, F56, to, 8);
4374     __ stf(FloatRegisterImpl::D, F2, to, 16);
4375     __ stf(FloatRegisterImpl::D, F60, to, 24);
4376     __ add(to, 32, to);
4377     __ orn(G0, G2, G2);
4378     __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
4379     __ mov(G1, to);
4380 
4381     __ BIND(L_check_decrypt_loop_end256);
4382     __ add(from, 32, from);
4383     __ add(to, 32, to);
4384     __ subcc(len_reg, 32, len_reg);
4385     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256);
4386     __ delayed()->nop();
4387 
4388     __ BIND(L_cbcdec_end);
4389     // re-init intial vector for next block, 8-byte alignment is guaranteed
4390     __ stx(L0, rvec, 0);
4391     __ stx(L1, rvec, 8);
4392     __ mov(L7, I0);
4393     __ ret();
4394     __ delayed()->restore();
4395 
4396     return start;
4397   }
4398 
4399   address generate_sha1_implCompress(bool multi_block, const char *name) {
4400     __ align(CodeEntryAlignment);
4401     StubCodeMark mark(this, "StubRoutines", name);
4402     address start = __ pc();
4403 
4404     Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop;
4405     int i;
4406 
4407     Register buf   = O0; // byte[] source+offset
4408     Register state = O1; // int[]  SHA.state
4409     Register ofs   = O2; // int    offset
4410     Register limit = O3; // int    limit
4411 
4412     // load state into F0-F4
4413     for (i = 0; i < 5; i++) {
4414       __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i));
4415     }
4416 
4417     __ andcc(buf, 7, G0);
4418     __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input);
4419     __ delayed()->nop();
4420 
4421     __ BIND(L_sha1_loop);
4422     // load buf into F8-F22
4423     for (i = 0; i < 8; i++) {
4424       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4425     }
4426     __ sha1();
4427     if (multi_block) {
4428       __ add(ofs, 64, ofs);
4429       __ add(buf, 64, buf);
4430       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop);
4431       __ mov(ofs, O0); // to be returned
4432     }
4433 
4434     // store F0-F4 into state and return
4435     for (i = 0; i < 4; i++) {
4436       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4437     }
4438     __ retl();
4439     __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10);
4440 
4441     __ BIND(L_sha1_unaligned_input);
4442     __ alignaddr(buf, G0, buf);
4443 
4444     __ BIND(L_sha1_unaligned_input_loop);
4445     // load buf into F8-F22
4446     for (i = 0; i < 9; i++) {
4447       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4448     }
4449     for (i = 0; i < 8; i++) {
4450       __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8));
4451     }
4452     __ sha1();
4453     if (multi_block) {
4454       __ add(ofs, 64, ofs);
4455       __ add(buf, 64, buf);
4456       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop);
4457       __ mov(ofs, O0); // to be returned
4458     }
4459 
4460     // store F0-F4 into state and return
4461     for (i = 0; i < 4; i++) {
4462       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4463     }
4464     __ retl();
4465     __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10);
4466 
4467     return start;
4468   }
4469 
4470   address generate_sha256_implCompress(bool multi_block, const char *name) {
4471     __ align(CodeEntryAlignment);
4472     StubCodeMark mark(this, "StubRoutines", name);
4473     address start = __ pc();
4474 
4475     Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop;
4476     int i;
4477 
4478     Register buf   = O0; // byte[] source+offset
4479     Register state = O1; // int[]  SHA2.state
4480     Register ofs   = O2; // int    offset
4481     Register limit = O3; // int    limit
4482 
4483     // load state into F0-F7
4484     for (i = 0; i < 8; i++) {
4485       __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i));
4486     }
4487 
4488     __ andcc(buf, 7, G0);
4489     __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input);
4490     __ delayed()->nop();
4491 
4492     __ BIND(L_sha256_loop);
4493     // load buf into F8-F22
4494     for (i = 0; i < 8; i++) {
4495       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4496     }
4497     __ sha256();
4498     if (multi_block) {
4499       __ add(ofs, 64, ofs);
4500       __ add(buf, 64, buf);
4501       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop);
4502       __ mov(ofs, O0); // to be returned
4503     }
4504 
4505     // store F0-F7 into state and return
4506     for (i = 0; i < 7; i++) {
4507       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4508     }
4509     __ retl();
4510     __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c);
4511 
4512     __ BIND(L_sha256_unaligned_input);
4513     __ alignaddr(buf, G0, buf);
4514 
4515     __ BIND(L_sha256_unaligned_input_loop);
4516     // load buf into F8-F22
4517     for (i = 0; i < 9; i++) {
4518       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
4519     }
4520     for (i = 0; i < 8; i++) {
4521       __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8));
4522     }
4523     __ sha256();
4524     if (multi_block) {
4525       __ add(ofs, 64, ofs);
4526       __ add(buf, 64, buf);
4527       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop);
4528       __ mov(ofs, O0); // to be returned
4529     }
4530 
4531     // store F0-F7 into state and return
4532     for (i = 0; i < 7; i++) {
4533       __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
4534     }
4535     __ retl();
4536     __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c);
4537 
4538     return start;
4539   }
4540 
4541   address generate_sha512_implCompress(bool multi_block, const char *name) {
4542     __ align(CodeEntryAlignment);
4543     StubCodeMark mark(this, "StubRoutines", name);
4544     address start = __ pc();
4545 
4546     Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop;
4547     int i;
4548 
4549     Register buf   = O0; // byte[] source+offset
4550     Register state = O1; // long[] SHA5.state
4551     Register ofs   = O2; // int    offset
4552     Register limit = O3; // int    limit
4553 
4554     // load state into F0-F14
4555     for (i = 0; i < 8; i++) {
4556       __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2));
4557     }
4558 
4559     __ andcc(buf, 7, G0);
4560     __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input);
4561     __ delayed()->nop();
4562 
4563     __ BIND(L_sha512_loop);
4564     // load buf into F16-F46
4565     for (i = 0; i < 16; i++) {
4566       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16));
4567     }
4568     __ sha512();
4569     if (multi_block) {
4570       __ add(ofs, 128, ofs);
4571       __ add(buf, 128, buf);
4572       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop);
4573       __ mov(ofs, O0); // to be returned
4574     }
4575 
4576     // store F0-F14 into state and return
4577     for (i = 0; i < 7; i++) {
4578       __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
4579     }
4580     __ retl();
4581     __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
4582 
4583     __ BIND(L_sha512_unaligned_input);
4584     __ alignaddr(buf, G0, buf);
4585 
4586     __ BIND(L_sha512_unaligned_input_loop);
4587     // load buf into F16-F46
4588     for (i = 0; i < 17; i++) {
4589       __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16));
4590     }
4591     for (i = 0; i < 16; i++) {
4592       __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16));
4593     }
4594     __ sha512();
4595     if (multi_block) {
4596       __ add(ofs, 128, ofs);
4597       __ add(buf, 128, buf);
4598       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop);
4599       __ mov(ofs, O0); // to be returned
4600     }
4601 
4602     // store F0-F14 into state and return
4603     for (i = 0; i < 7; i++) {
4604       __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
4605     }
4606     __ retl();
4607     __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
4608 
4609     return start;
4610   }
4611 
4612   /* Single and multi-block ghash operations */
4613   address generate_ghash_processBlocks() {
4614       __ align(CodeEntryAlignment);
4615       Label L_ghash_loop, L_aligned, L_main;
4616       StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
4617       address start = __ pc();
4618 
4619       Register state = I0;
4620       Register subkeyH = I1;
4621       Register data = I2;
4622       Register len = I3;
4623 
4624       __ save_frame(0);
4625 
4626       __ ldx(state, 0, O0);
4627       __ ldx(state, 8, O1);
4628 
4629       // Loop label for multiblock operations
4630       __ BIND(L_ghash_loop);
4631 
4632       // Check if 'data' is unaligned
4633       __ andcc(data, 7, G1);
4634       __ br(Assembler::zero, false, Assembler::pt, L_aligned);
4635       __ delayed()->nop();
4636 
4637       Register left_shift = L1;
4638       Register right_shift = L2;
4639       Register data_ptr = L3;
4640 
4641       // Get left and right shift values in bits
4642       __ sll(G1, LogBitsPerByte, left_shift);
4643       __ mov(64, right_shift);
4644       __ sub(right_shift, left_shift, right_shift);
4645 
4646       // Align to read 'data'
4647       __ sub(data, G1, data_ptr);
4648 
4649       // Load first 8 bytes of 'data'
4650       __ ldx(data_ptr, 0, O4);
4651       __ sllx(O4, left_shift, O4);
4652       __ ldx(data_ptr, 8, O5);
4653       __ srlx(O5, right_shift, G4);
4654       __ bset(G4, O4);
4655 
4656       // Load second 8 bytes of 'data'
4657       __ sllx(O5, left_shift, O5);
4658       __ ldx(data_ptr, 16, G4);
4659       __ srlx(G4, right_shift, G4);
4660       __ ba(L_main);
4661       __ delayed()->bset(G4, O5);
4662 
4663       // If 'data' is aligned, load normally
4664       __ BIND(L_aligned);
4665       __ ldx(data, 0, O4);
4666       __ ldx(data, 8, O5);
4667 
4668       __ BIND(L_main);
4669       __ ldx(subkeyH, 0, O2);
4670       __ ldx(subkeyH, 8, O3);
4671 
4672       __ xor3(O0, O4, O0);
4673       __ xor3(O1, O5, O1);
4674 
4675       __ xmulxhi(O0, O3, G3);
4676       __ xmulx(O0, O2, O5);
4677       __ xmulxhi(O1, O2, G4);
4678       __ xmulxhi(O1, O3, G5);
4679       __ xmulx(O0, O3, G1);
4680       __ xmulx(O1, O3, G2);
4681       __ xmulx(O1, O2, O3);
4682       __ xmulxhi(O0, O2, O4);
4683 
4684       __ mov(0xE1, O0);
4685       __ sllx(O0, 56, O0);
4686 
4687       __ xor3(O5, G3, O5);
4688       __ xor3(O5, G4, O5);
4689       __ xor3(G5, G1, G1);
4690       __ xor3(G1, O3, G1);
4691       __ srlx(G2, 63, O1);
4692       __ srlx(G1, 63, G3);
4693       __ sllx(G2, 63, O3);
4694       __ sllx(G2, 58, O2);
4695       __ xor3(O3, O2, O2);
4696 
4697       __ sllx(G1, 1, G1);
4698       __ or3(G1, O1, G1);
4699 
4700       __ xor3(G1, O2, G1);
4701 
4702       __ sllx(G2, 1, G2);
4703 
4704       __ xmulxhi(G1, O0, O1);
4705       __ xmulx(G1, O0, O2);
4706       __ xmulxhi(G2, O0, O3);
4707       __ xmulx(G2, O0, G1);
4708 
4709       __ xor3(O4, O1, O4);
4710       __ xor3(O5, O2, O5);
4711       __ xor3(O5, O3, O5);
4712 
4713       __ sllx(O4, 1, O2);
4714       __ srlx(O5, 63, O3);
4715 
4716       __ or3(O2, O3, O0);
4717 
4718       __ sllx(O5, 1, O1);
4719       __ srlx(G1, 63, O2);
4720       __ or3(O1, O2, O1);
4721       __ xor3(O1, G3, O1);
4722 
4723       __ deccc(len);
4724       __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop);
4725       __ delayed()->add(data, 16, data);
4726 
4727       __ stx(O0, I0, 0);
4728       __ stx(O1, I0, 8);
4729 
4730       __ ret();
4731       __ delayed()->restore();
4732 
4733       return start;
4734   }
4735 
4736   /**
4737    *  Arguments:
4738    *
4739    * Inputs:
4740    *   O0   - int   crc
4741    *   O1   - byte* buf
4742    *   O2   - int   len
4743    *   O3   - int*  table
4744    *
4745    * Output:
4746    *   O0   - int crc result
4747    */
4748   address generate_updateBytesCRC32C() {
4749     assert(UseCRC32CIntrinsics, "need CRC32C instruction");
4750 
4751     __ align(CodeEntryAlignment);
4752     StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C");
4753     address start = __ pc();
4754 
4755     const Register crc   = O0;  // crc
4756     const Register buf   = O1;  // source java byte array address
4757     const Register len   = O2;  // number of bytes
4758     const Register table = O3;  // byteTable
4759 
4760     __ kernel_crc32c(crc, buf, len, table);
4761 
4762     __ retl();
4763     __ delayed()->nop();
4764 
4765     return start;
4766   }
4767 
4768 #define ADLER32_NUM_TEMPS 16
4769 
4770   /**
4771    *  Arguments:
4772    *
4773    * Inputs:
4774    *   O0   - int   adler
4775    *   O1   - byte* buff
4776    *   O2   - int   len
4777    *
4778    * Output:
4779    *   O0   - int adler result
4780    */
4781   address generate_updateBytesAdler32() {
4782     __ align(CodeEntryAlignment);
4783     StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
4784     address start = __ pc();
4785 
4786     Label L_cleanup_loop, L_cleanup_loop_check;
4787     Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check;
4788     Label L_nmax_check_done;
4789 
4790     // Aliases
4791     Register s1     = O0;
4792     Register s2     = O3;
4793     Register buff   = O1;
4794     Register len    = O2;
4795     Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7};
4796 
4797     // Max number of bytes we can process before having to take the mod
4798     // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
4799     unsigned long NMAX = 0x15B0;
4800 
4801     // Zero-out the upper bits of len
4802     __ clruwu(len);
4803 
4804     // Create the mask 0xFFFF
4805     __ set64(0x00FFFF, O4, O5); // O5 is the temp register
4806 
4807     // s1 is initialized to the lower 16 bits of adler
4808     // s2 is initialized to the upper 16 bits of adler
4809     __ srlx(O0, 16, O5); // adler >> 16
4810     __ and3(O0, O4, s1); // s1  = (adler & 0xFFFF)
4811     __ and3(O5, O4, s2); // s2  = ((adler >> 16) & 0xFFFF)
4812 
4813     // The pipelined loop needs at least 16 elements for 1 iteration
4814     // It does check this, but it is more effective to skip to the cleanup loop
4815     // Setup the constant for cutoff checking
4816     __ mov(15, O4);
4817 
4818     // Check if we are above the cutoff, if not go to the cleanup loop immediately
4819     __ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check);
4820 
4821     // Free up some registers for our use
4822     for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
4823       __ movxtod(temp[i], as_FloatRegister(2*i));
4824     }
4825 
4826     // Loop maintenance stuff is done at the end of the loop, so skip to there
4827     __ ba_short(L_main_loop_check);
4828 
4829     __ BIND(L_main_loop);
4830 
4831     // Prologue for inner loop
4832     __ ldub(buff, 0, L0);
4833     __ dec(O5);
4834 
4835     for (int i = 1; i < 8; i++) {
4836       __ ldub(buff, i, temp[i]);
4837     }
4838 
4839     __ inc(buff, 8);
4840 
4841     // Inner loop processes 16 elements at a time, might never execute if only 16 elements
4842     // to be processed by the outter loop
4843     __ ba_short(L_inner_loop_check);
4844 
4845     __ BIND(L_inner_loop);
4846 
4847     for (int i = 0; i < 8; i++) {
4848       __ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]);
4849       __ add(s1, temp[i], s1);
4850       __ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]);
4851       __ add(s2, s1, s2);
4852     }
4853 
4854     // Original temp 0-7 used and new loads to temp 0-7 issued
4855     // temp 8-15 ready to be consumed
4856     __ add(s1, I0, s1);
4857     __ dec(O5);
4858     __ add(s2, s1, s2);
4859     __ add(s1, I1, s1);
4860     __ inc(buff, 16);
4861     __ add(s2, s1, s2);
4862 
4863     for (int i = 0; i < 6; i++) {
4864       __ add(s1, temp[10+i], s1);
4865       __ add(s2, s1, s2);
4866     }
4867 
4868     __ BIND(L_inner_loop_check);
4869     __ nop();
4870     __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop);
4871 
4872     // Epilogue
4873     for (int i = 0; i < 4; i++) {
4874       __ ldub(buff, (2*i), temp[8+(2*i)]);
4875       __ add(s1, temp[i], s1);
4876       __ ldub(buff, (2*i)+1, temp[8+(2*i)+1]);
4877       __ add(s2, s1, s2);
4878     }
4879 
4880     __ add(s1, temp[4], s1);
4881     __ inc(buff, 8);
4882 
4883     for (int i = 0; i < 11; i++) {
4884       __ add(s2, s1, s2);
4885       __ add(s1, temp[5+i], s1);
4886     }
4887 
4888     __ add(s2, s1, s2);
4889 
4890     // Take the mod for s1 and s2
4891     __ set64(0xFFF1, L0, L1);
4892     __ udivx(s1, L0, L1);
4893     __ udivx(s2, L0, L2);
4894     __ mulx(L0, L1, L1);
4895     __ mulx(L0, L2, L2);
4896     __ sub(s1, L1, s1);
4897     __ sub(s2, L2, s2);
4898 
4899     // Make sure there is something left to process
4900     __ BIND(L_main_loop_check);
4901     __ set64(NMAX, L0, L1);
4902     // k = len < NMAX ? len : NMAX
4903     __ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done);
4904     __ andn(len, 0x0F, L0); // only loop a multiple of 16 times
4905     __ BIND(L_nmax_check_done);
4906     __ mov(L0, O5);
4907     __ sub(len, L0, len); // len -= k
4908 
4909     __ srlx(O5, 4, O5); // multiplies of 16
4910     __ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop);
4911 
4912     // Restore anything we used, take the mod one last time, combine and return
4913     // Restore any registers we saved
4914     for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
4915       __ movdtox(as_FloatRegister(2*i), temp[i]);
4916     }
4917 
4918     // There might be nothing left to process
4919     __ ba_short(L_cleanup_loop_check);
4920 
4921     __ BIND(L_cleanup_loop);
4922     __ ldub(buff, 0, O4); // load single byte form buffer
4923     __ inc(buff); // buff++
4924     __ add(s1, O4, s1); // s1 += *buff++;
4925     __ dec(len); // len--
4926     __ add(s1, s2, s2); // s2 += s1;
4927     __ BIND(L_cleanup_loop_check);
4928     __ nop();
4929     __ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop);
4930 
4931     // Take the mod one last time
4932     __ set64(0xFFF1, O1, O2);
4933     __ udivx(s1, O1, O2);
4934     __ udivx(s2, O1, O5);
4935     __ mulx(O1, O2, O2);
4936     __ mulx(O1, O5, O5);
4937     __ sub(s1, O2, s1);
4938     __ sub(s2, O5, s2);
4939 
4940     // Combine lower bits and higher bits
4941     __ sllx(s2, 16, s2); // s2 = s2 << 16
4942     __ or3(s1, s2, s1);  // adler = s2 | s1
4943     // Final return value is in O0
4944     __ retl();
4945     __ delayed()->nop();
4946 
4947     return start;
4948   }
4949 
4950 /**
4951    *  Arguments:
4952    *
4953    * Inputs:
4954    *   O0   - int   crc
4955    *   O1   - byte* buf
4956    *   O2   - int   len
4957    *   O3   - int*  table
4958    *
4959    * Output:
4960    *   O0   - int crc result
4961    */
4962   address generate_updateBytesCRC32() {
4963     assert(UseCRC32Intrinsics, "need VIS3 instructions");
4964 
4965     __ align(CodeEntryAlignment);
4966     StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
4967     address start = __ pc();
4968 
4969     const Register crc   = O0; // crc
4970     const Register buf   = O1; // source java byte array address
4971     const Register len   = O2; // length
4972     const Register table = O3; // crc_table address (reuse register)
4973 
4974     __ kernel_crc32(crc, buf, len, table);
4975 
4976     __ retl();
4977     __ delayed()->nop();
4978 
4979     return start;
4980   }
4981 
4982   void generate_initial() {
4983     // Generates all stubs and initializes the entry points
4984 
4985     //------------------------------------------------------------------------------------------------------------------------
4986     // entry points that exist in all platforms
4987     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
4988     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
4989     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
4990 
4991     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
4992     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
4993 
4994     //------------------------------------------------------------------------------------------------------------------------
4995     // entry points that are platform specific
4996     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
4997 
4998     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
4999     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
5000 
5001     // Build this early so it's available for the interpreter.
5002     StubRoutines::_throw_StackOverflowError_entry =
5003             generate_throw_exception("StackOverflowError throw_exception",
5004             CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
5005     StubRoutines::_throw_delayed_StackOverflowError_entry =
5006             generate_throw_exception("delayed StackOverflowError throw_exception",
5007             CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
5008 
5009     if (UseCRC32Intrinsics) {
5010       // set table address before stub generation which use it
5011       StubRoutines::_crc_table_adr = (address)StubRoutines::Sparc::_crc_table;
5012       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
5013     }
5014 
5015     if (UseCRC32CIntrinsics) {
5016       // set table address before stub generation which use it
5017       StubRoutines::_crc32c_table_addr = (address)StubRoutines::Sparc::_crc32c_table;
5018       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
5019     }
5020   }
5021 
5022 
5023   void generate_all() {
5024     // Generates all stubs and initializes the entry points
5025 
5026     // Generate partial_subtype_check first here since its code depends on
5027     // UseZeroBaseCompressedOops which is defined after heap initialization.
5028     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
5029     // These entry points require SharedInfo::stack0 to be set up in non-core builds
5030     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
5031     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
5032     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
5033 
5034     // support for verify_oop (must happen after universe_init)
5035     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
5036 
5037     // arraycopy stubs used by compilers
5038     generate_arraycopy_stubs();
5039 
5040     // Don't initialize the platform math functions since sparc
5041     // doesn't have intrinsics for these operations.
5042 
5043     // Safefetch stubs.
5044     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
5045                                                        &StubRoutines::_safefetch32_fault_pc,
5046                                                        &StubRoutines::_safefetch32_continuation_pc);
5047     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
5048                                                        &StubRoutines::_safefetchN_fault_pc,
5049                                                        &StubRoutines::_safefetchN_continuation_pc);
5050 
5051     // generate AES intrinsics code
5052     if (UseAESIntrinsics) {
5053       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
5054       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
5055       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
5056       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
5057     }
5058     // generate GHASH intrinsics code
5059     if (UseGHASHIntrinsics) {
5060       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
5061     }
5062 
5063     // generate SHA1/SHA256/SHA512 intrinsics code
5064     if (UseSHA1Intrinsics) {
5065       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
5066       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
5067     }
5068     if (UseSHA256Intrinsics) {
5069       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5070       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5071     }
5072     if (UseSHA512Intrinsics) {
5073       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
5074       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true,  "sha512_implCompressMB");
5075     }
5076     // generate Adler32 intrinsics code
5077     if (UseAdler32Intrinsics) {
5078       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5079     }
5080   }
5081 
5082 
5083  public:
5084   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
5085     // replace the standard masm with a special one:
5086     _masm = new MacroAssembler(code);
5087 
5088     _stub_count = !all ? 0x100 : 0x200;
5089     if (all) {
5090       generate_all();
5091     } else {
5092       generate_initial();
5093     }
5094 
5095     // make sure this stub is available for all local calls
5096     if (_atomic_add_stub.is_unbound()) {
5097       // generate a second time, if necessary
5098       (void) generate_atomic_add();
5099     }
5100   }
5101 
5102 
5103  private:
5104   int _stub_count;
5105   void stub_prolog(StubCodeDesc* cdesc) {
5106     # ifdef ASSERT
5107       // put extra information in the stub code, to make it more readable
5108 // Write the high part of the address
5109 // [RGV] Check if there is a dependency on the size of this prolog
5110       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
5111       __ emit_data((intptr_t)cdesc,    relocInfo::none);
5112       __ emit_data(++_stub_count, relocInfo::none);
5113     # endif
5114     align(true);
5115   }
5116 
5117   void align(bool at_header = false) {
5118     // %%%%% move this constant somewhere else
5119     // UltraSPARC cache line size is 8 instructions:
5120     const unsigned int icache_line_size = 32;
5121     const unsigned int icache_half_line_size = 16;
5122 
5123     if (at_header) {
5124       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
5125         __ emit_data(0, relocInfo::none);
5126       }
5127     } else {
5128       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
5129         __ nop();
5130       }
5131     }
5132   }
5133 
5134 }; // end class declaration
5135 
5136 void StubGenerator_generate(CodeBuffer* code, bool all) {
5137   StubGenerator g(code, all);
5138 }