1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "gc/shared/cardTable.hpp"
  29 #include "gc/shared/cardTableBarrierSet.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "nativeInst_ppc.hpp"
  32 #include "oops/instanceOop.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubCodeGenerator.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "utilities/align.hpp"
  44 
  45 // Declaration and definition of StubGenerator (no .hpp file).
  46 // For a more detailed description of the stub routine structure
  47 // see the comment in stubRoutines.hpp.
  48 
  49 #define __ _masm->
  50 
  51 #ifdef PRODUCT
  52 #define BLOCK_COMMENT(str) // nothing
  53 #else
  54 #define BLOCK_COMMENT(str) __ block_comment(str)
  55 #endif
  56 
  57 #if defined(ABI_ELFv2)
  58 #define STUB_ENTRY(name) StubRoutines::name()
  59 #else
  60 #define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
  61 #endif
  62 
  63 class StubGenerator: public StubCodeGenerator {
  64  private:
  65 
  66   // Call stubs are used to call Java from C
  67   //
  68   // Arguments:
  69   //
  70   //   R3  - call wrapper address     : address
  71   //   R4  - result                   : intptr_t*
  72   //   R5  - result type              : BasicType
  73   //   R6  - method                   : Method
  74   //   R7  - frame mgr entry point    : address
  75   //   R8  - parameter block          : intptr_t*
  76   //   R9  - parameter count in words : int
  77   //   R10 - thread                   : Thread*
  78   //
  79   address generate_call_stub(address& return_address) {
  80     // Setup a new c frame, copy java arguments, call frame manager or
  81     // native_entry, and process result.
  82 
  83     StubCodeMark mark(this, "StubRoutines", "call_stub");
  84 
  85     address start = __ function_entry();
  86 
  87     // some sanity checks
  88     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
  89     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
  90     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
  91     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
  92     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
  93 
  94     Register r_arg_call_wrapper_addr        = R3;
  95     Register r_arg_result_addr              = R4;
  96     Register r_arg_result_type              = R5;
  97     Register r_arg_method                   = R6;
  98     Register r_arg_entry                    = R7;
  99     Register r_arg_thread                   = R10;
 100 
 101     Register r_temp                         = R24;
 102     Register r_top_of_arguments_addr        = R25;
 103     Register r_entryframe_fp                = R26;
 104 
 105     {
 106       // Stack on entry to call_stub:
 107       //
 108       //      F1      [C_FRAME]
 109       //              ...
 110 
 111       Register r_arg_argument_addr          = R8;
 112       Register r_arg_argument_count         = R9;
 113       Register r_frame_alignment_in_bytes   = R27;
 114       Register r_argument_addr              = R28;
 115       Register r_argumentcopy_addr          = R29;
 116       Register r_argument_size_in_bytes     = R30;
 117       Register r_frame_size                 = R23;
 118 
 119       Label arguments_copied;
 120 
 121       // Save LR/CR to caller's C_FRAME.
 122       __ save_LR_CR(R0);
 123 
 124       // Zero extend arg_argument_count.
 125       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
 126 
 127       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
 128       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 129 
 130       // Keep copy of our frame pointer (caller's SP).
 131       __ mr(r_entryframe_fp, R1_SP);
 132 
 133       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
 134       // Push ENTRY_FRAME including arguments:
 135       //
 136       //      F0      [TOP_IJAVA_FRAME_ABI]
 137       //              alignment (optional)
 138       //              [outgoing Java arguments]
 139       //              [ENTRY_FRAME_LOCALS]
 140       //      F1      [C_FRAME]
 141       //              ...
 142 
 143       // calculate frame size
 144 
 145       // unaligned size of arguments
 146       __ sldi(r_argument_size_in_bytes,
 147                   r_arg_argument_count, Interpreter::logStackElementSize);
 148       // arguments alignment (max 1 slot)
 149       // FIXME: use round_to() here
 150       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
 151       __ sldi(r_frame_alignment_in_bytes,
 152               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
 153 
 154       // size = unaligned size of arguments + top abi's size
 155       __ addi(r_frame_size, r_argument_size_in_bytes,
 156               frame::top_ijava_frame_abi_size);
 157       // size += arguments alignment
 158       __ add(r_frame_size,
 159              r_frame_size, r_frame_alignment_in_bytes);
 160       // size += size of call_stub locals
 161       __ addi(r_frame_size,
 162               r_frame_size, frame::entry_frame_locals_size);
 163 
 164       // push ENTRY_FRAME
 165       __ push_frame(r_frame_size, r_temp);
 166 
 167       // initialize call_stub locals (step 1)
 168       __ std(r_arg_call_wrapper_addr,
 169              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
 170       __ std(r_arg_result_addr,
 171              _entry_frame_locals_neg(result_address), r_entryframe_fp);
 172       __ std(r_arg_result_type,
 173              _entry_frame_locals_neg(result_type), r_entryframe_fp);
 174       // we will save arguments_tos_address later
 175 
 176 
 177       BLOCK_COMMENT("Copy Java arguments");
 178       // copy Java arguments
 179 
 180       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
 181       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
 182       __ addi(r_top_of_arguments_addr,
 183               R1_SP, frame::top_ijava_frame_abi_size);
 184       __ add(r_top_of_arguments_addr,
 185              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
 186 
 187       // any arguments to copy?
 188       __ cmpdi(CCR0, r_arg_argument_count, 0);
 189       __ beq(CCR0, arguments_copied);
 190 
 191       // prepare loop and copy arguments in reverse order
 192       {
 193         // init CTR with arg_argument_count
 194         __ mtctr(r_arg_argument_count);
 195 
 196         // let r_argumentcopy_addr point to last outgoing Java arguments P
 197         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
 198 
 199         // let r_argument_addr point to last incoming java argument
 200         __ add(r_argument_addr,
 201                    r_arg_argument_addr, r_argument_size_in_bytes);
 202         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 203 
 204         // now loop while CTR > 0 and copy arguments
 205         {
 206           Label next_argument;
 207           __ bind(next_argument);
 208 
 209           __ ld(r_temp, 0, r_argument_addr);
 210           // argument_addr--;
 211           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 212           __ std(r_temp, 0, r_argumentcopy_addr);
 213           // argumentcopy_addr++;
 214           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
 215 
 216           __ bdnz(next_argument);
 217         }
 218       }
 219 
 220       // Arguments copied, continue.
 221       __ bind(arguments_copied);
 222     }
 223 
 224     {
 225       BLOCK_COMMENT("Call frame manager or native entry.");
 226       // Call frame manager or native entry.
 227       Register r_new_arg_entry = R14;
 228       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
 229                                  r_arg_method, r_arg_thread);
 230 
 231       __ mr(r_new_arg_entry, r_arg_entry);
 232 
 233       // Register state on entry to frame manager / native entry:
 234       //
 235       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
 236       //   R19_method  -  Method
 237       //   R16_thread  -  JavaThread*
 238 
 239       // Tos must point to last argument - element_size.
 240       const Register tos = R15_esp;
 241 
 242       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
 243 
 244       // initialize call_stub locals (step 2)
 245       // now save tos as arguments_tos_address
 246       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
 247 
 248       // load argument registers for call
 249       __ mr(R19_method, r_arg_method);
 250       __ mr(R16_thread, r_arg_thread);
 251       assert(tos != r_arg_method, "trashed r_arg_method");
 252       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
 253 
 254       // Set R15_prev_state to 0 for simplifying checks in callee.
 255       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
 256       // Stack on entry to frame manager / native entry:
 257       //
 258       //      F0      [TOP_IJAVA_FRAME_ABI]
 259       //              alignment (optional)
 260       //              [outgoing Java arguments]
 261       //              [ENTRY_FRAME_LOCALS]
 262       //      F1      [C_FRAME]
 263       //              ...
 264       //
 265 
 266       // global toc register
 267       __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
 268       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
 269       // when called via a c2i.
 270 
 271       // Pass initial_caller_sp to framemanager.
 272       __ mr(R21_tmp1, R1_SP);
 273 
 274       // Do a light-weight C-call here, r_new_arg_entry holds the address
 275       // of the interpreter entry point (frame manager or native entry)
 276       // and save runtime-value of LR in return_address.
 277       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
 278              "trashed r_new_arg_entry");
 279       return_address = __ call_stub(r_new_arg_entry);
 280     }
 281 
 282     {
 283       BLOCK_COMMENT("Returned from frame manager or native entry.");
 284       // Returned from frame manager or native entry.
 285       // Now pop frame, process result, and return to caller.
 286 
 287       // Stack on exit from frame manager / native entry:
 288       //
 289       //      F0      [ABI]
 290       //              ...
 291       //              [ENTRY_FRAME_LOCALS]
 292       //      F1      [C_FRAME]
 293       //              ...
 294       //
 295       // Just pop the topmost frame ...
 296       //
 297 
 298       Label ret_is_object;
 299       Label ret_is_long;
 300       Label ret_is_float;
 301       Label ret_is_double;
 302 
 303       Register r_entryframe_fp = R30;
 304       Register r_lr            = R7_ARG5;
 305       Register r_cr            = R8_ARG6;
 306 
 307       // Reload some volatile registers which we've spilled before the call
 308       // to frame manager / native entry.
 309       // Access all locals via frame pointer, because we know nothing about
 310       // the topmost frame's size.
 311       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
 312       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
 313       __ ld(r_arg_result_addr,
 314             _entry_frame_locals_neg(result_address), r_entryframe_fp);
 315       __ ld(r_arg_result_type,
 316             _entry_frame_locals_neg(result_type), r_entryframe_fp);
 317       __ ld(r_cr, _abi(cr), r_entryframe_fp);
 318       __ ld(r_lr, _abi(lr), r_entryframe_fp);
 319 
 320       // pop frame and restore non-volatiles, LR and CR
 321       __ mr(R1_SP, r_entryframe_fp);
 322       __ mtcr(r_cr);
 323       __ mtlr(r_lr);
 324 
 325       // Store result depending on type. Everything that is not
 326       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
 327       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
 328       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
 329       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
 330       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
 331 
 332       // restore non-volatile registers
 333       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 334 
 335 
 336       // Stack on exit from call_stub:
 337       //
 338       //      0       [C_FRAME]
 339       //              ...
 340       //
 341       //  no call_stub frames left.
 342 
 343       // All non-volatiles have been restored at this point!!
 344       assert(R3_RET == R3, "R3_RET should be R3");
 345 
 346       __ beq(CCR0, ret_is_object);
 347       __ beq(CCR1, ret_is_long);
 348       __ beq(CCR5, ret_is_float);
 349       __ beq(CCR6, ret_is_double);
 350 
 351       // default:
 352       __ stw(R3_RET, 0, r_arg_result_addr);
 353       __ blr(); // return to caller
 354 
 355       // case T_OBJECT:
 356       __ bind(ret_is_object);
 357       __ std(R3_RET, 0, r_arg_result_addr);
 358       __ blr(); // return to caller
 359 
 360       // case T_LONG:
 361       __ bind(ret_is_long);
 362       __ std(R3_RET, 0, r_arg_result_addr);
 363       __ blr(); // return to caller
 364 
 365       // case T_FLOAT:
 366       __ bind(ret_is_float);
 367       __ stfs(F1_RET, 0, r_arg_result_addr);
 368       __ blr(); // return to caller
 369 
 370       // case T_DOUBLE:
 371       __ bind(ret_is_double);
 372       __ stfd(F1_RET, 0, r_arg_result_addr);
 373       __ blr(); // return to caller
 374     }
 375 
 376     return start;
 377   }
 378 
 379   // Return point for a Java call if there's an exception thrown in
 380   // Java code.  The exception is caught and transformed into a
 381   // pending exception stored in JavaThread that can be tested from
 382   // within the VM.
 383   //
 384   address generate_catch_exception() {
 385     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 386 
 387     address start = __ pc();
 388 
 389     // Registers alive
 390     //
 391     //  R16_thread
 392     //  R3_ARG1 - address of pending exception
 393     //  R4_ARG2 - return address in call stub
 394 
 395     const Register exception_file = R21_tmp1;
 396     const Register exception_line = R22_tmp2;
 397 
 398     __ load_const(exception_file, (void*)__FILE__);
 399     __ load_const(exception_line, (void*)__LINE__);
 400 
 401     __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
 402     // store into `char *'
 403     __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
 404     // store into `int'
 405     __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
 406 
 407     // complete return to VM
 408     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 409 
 410     __ mtlr(R4_ARG2);
 411     // continue in call stub
 412     __ blr();
 413 
 414     return start;
 415   }
 416 
 417   // Continuation point for runtime calls returning with a pending
 418   // exception.  The pending exception check happened in the runtime
 419   // or native call stub.  The pending exception in Thread is
 420   // converted into a Java-level exception.
 421   //
 422   // Read:
 423   //
 424   //   LR:     The pc the runtime library callee wants to return to.
 425   //           Since the exception occurred in the callee, the return pc
 426   //           from the point of view of Java is the exception pc.
 427   //   thread: Needed for method handles.
 428   //
 429   // Invalidate:
 430   //
 431   //   volatile registers (except below).
 432   //
 433   // Update:
 434   //
 435   //   R4_ARG2: exception
 436   //
 437   // (LR is unchanged and is live out).
 438   //
 439   address generate_forward_exception() {
 440     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 441     address start = __ pc();
 442 
 443 #if !defined(PRODUCT)
 444     if (VerifyOops) {
 445       // Get pending exception oop.
 446       __ ld(R3_ARG1,
 447                 in_bytes(Thread::pending_exception_offset()),
 448                 R16_thread);
 449       // Make sure that this code is only executed if there is a pending exception.
 450       {
 451         Label L;
 452         __ cmpdi(CCR0, R3_ARG1, 0);
 453         __ bne(CCR0, L);
 454         __ stop("StubRoutines::forward exception: no pending exception (1)");
 455         __ bind(L);
 456       }
 457       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
 458     }
 459 #endif
 460 
 461     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
 462     __ save_LR_CR(R4_ARG2);
 463     __ push_frame_reg_args(0, R0);
 464     // Find exception handler.
 465     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 466                      SharedRuntime::exception_handler_for_return_address),
 467                     R16_thread,
 468                     R4_ARG2);
 469     // Copy handler's address.
 470     __ mtctr(R3_RET);
 471     __ pop_frame();
 472     __ restore_LR_CR(R0);
 473 
 474     // Set up the arguments for the exception handler:
 475     //  - R3_ARG1: exception oop
 476     //  - R4_ARG2: exception pc.
 477 
 478     // Load pending exception oop.
 479     __ ld(R3_ARG1,
 480               in_bytes(Thread::pending_exception_offset()),
 481               R16_thread);
 482 
 483     // The exception pc is the return address in the caller.
 484     // Must load it into R4_ARG2.
 485     __ mflr(R4_ARG2);
 486 
 487 #ifdef ASSERT
 488     // Make sure exception is set.
 489     {
 490       Label L;
 491       __ cmpdi(CCR0, R3_ARG1, 0);
 492       __ bne(CCR0, L);
 493       __ stop("StubRoutines::forward exception: no pending exception (2)");
 494       __ bind(L);
 495     }
 496 #endif
 497 
 498     // Clear the pending exception.
 499     __ li(R0, 0);
 500     __ std(R0,
 501                in_bytes(Thread::pending_exception_offset()),
 502                R16_thread);
 503     // Jump to exception handler.
 504     __ bctr();
 505 
 506     return start;
 507   }
 508 
 509 #undef __
 510 #define __ masm->
 511   // Continuation point for throwing of implicit exceptions that are
 512   // not handled in the current activation. Fabricates an exception
 513   // oop and initiates normal exception dispatching in this
 514   // frame. Only callee-saved registers are preserved (through the
 515   // normal register window / RegisterMap handling).  If the compiler
 516   // needs all registers to be preserved between the fault point and
 517   // the exception handler then it must assume responsibility for that
 518   // in AbstractCompiler::continuation_for_implicit_null_exception or
 519   // continuation_for_implicit_division_by_zero_exception. All other
 520   // implicit exceptions (e.g., NullPointerException or
 521   // AbstractMethodError on entry) are either at call sites or
 522   // otherwise assume that stack unwinding will be initiated, so
 523   // caller saved registers were assumed volatile in the compiler.
 524   //
 525   // Note that we generate only this stub into a RuntimeStub, because
 526   // it needs to be properly traversed and ignored during GC, so we
 527   // change the meaning of the "__" macro within this method.
 528   //
 529   // Note: the routine set_pc_not_at_call_for_caller in
 530   // SharedRuntime.cpp requires that this code be generated into a
 531   // RuntimeStub.
 532   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 533                                    Register arg1 = noreg, Register arg2 = noreg) {
 534     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
 535     MacroAssembler* masm = new MacroAssembler(&code);
 536 
 537     OopMapSet* oop_maps  = new OopMapSet();
 538     int frame_size_in_bytes = frame::abi_reg_args_size;
 539     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
 540 
 541     address start = __ pc();
 542 
 543     __ save_LR_CR(R11_scratch1);
 544 
 545     // Push a frame.
 546     __ push_frame_reg_args(0, R11_scratch1);
 547 
 548     address frame_complete_pc = __ pc();
 549 
 550     if (restore_saved_exception_pc) {
 551       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
 552     }
 553 
 554     // Note that we always have a runtime stub frame on the top of
 555     // stack by this point. Remember the offset of the instruction
 556     // whose address will be moved to R11_scratch1.
 557     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
 558 
 559     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
 560 
 561     __ mr(R3_ARG1, R16_thread);
 562     if (arg1 != noreg) {
 563       __ mr(R4_ARG2, arg1);
 564     }
 565     if (arg2 != noreg) {
 566       __ mr(R5_ARG3, arg2);
 567     }
 568 #if defined(ABI_ELFv2)
 569     __ call_c(runtime_entry, relocInfo::none);
 570 #else
 571     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
 572 #endif
 573 
 574     // Set an oopmap for the call site.
 575     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
 576 
 577     __ reset_last_Java_frame();
 578 
 579 #ifdef ASSERT
 580     // Make sure that this code is only executed if there is a pending
 581     // exception.
 582     {
 583       Label L;
 584       __ ld(R0,
 585                 in_bytes(Thread::pending_exception_offset()),
 586                 R16_thread);
 587       __ cmpdi(CCR0, R0, 0);
 588       __ bne(CCR0, L);
 589       __ stop("StubRoutines::throw_exception: no pending exception");
 590       __ bind(L);
 591     }
 592 #endif
 593 
 594     // Pop frame.
 595     __ pop_frame();
 596 
 597     __ restore_LR_CR(R11_scratch1);
 598 
 599     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
 600     __ mtctr(R11_scratch1);
 601     __ bctr();
 602 
 603     // Create runtime stub with OopMap.
 604     RuntimeStub* stub =
 605       RuntimeStub::new_runtime_stub(name, &code,
 606                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
 607                                     frame_size_in_bytes/wordSize,
 608                                     oop_maps,
 609                                     false);
 610     return stub->entry_point();
 611   }
 612 #undef __
 613 #define __ _masm->
 614 
 615   //  Generate G1 pre-write barrier for array.
 616   //
 617   //  Input:
 618   //     from     - register containing src address (only needed for spilling)
 619   //     to       - register containing starting address
 620   //     count    - register containing element count
 621   //     tmp      - scratch register
 622   //
 623   //  Kills:
 624   //     nothing
 625   //
 626   void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
 627                                        Register preserve1 = noreg, Register preserve2 = noreg) {
 628     BarrierSet* const bs = Universe::heap()->barrier_set();
 629     switch (bs->kind()) {
 630       case BarrierSet::G1BarrierSet:
 631         // With G1, don't generate the call if we statically know that the target in uninitialized
 632         if (!dest_uninitialized) {
 633           int spill_slots = 3;
 634           if (preserve1 != noreg) { spill_slots++; }
 635           if (preserve2 != noreg) { spill_slots++; }
 636           const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 637           Label filtered;
 638 
 639           // Is marking active?
 640           if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 641             __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
 642           } else {
 643             guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 644             __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
 645           }
 646           __ cmpdi(CCR0, Rtmp1, 0);
 647           __ beq(CCR0, filtered);
 648 
 649           __ save_LR_CR(R0);
 650           __ push_frame(frame_size, R0);
 651           int slot_nr = 0;
 652           __ std(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
 653           __ std(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
 654           __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
 655           if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
 656           if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 657 
 658           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
 659 
 660           slot_nr = 0;
 661           __ ld(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
 662           __ ld(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
 663           __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
 664           if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
 665           if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 666           __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
 667           __ restore_LR_CR(R0);
 668 
 669           __ bind(filtered);
 670         }
 671         break;
 672       case BarrierSet::CardTableBarrierSet:
 673         break;
 674       default:
 675         ShouldNotReachHere();
 676     }
 677   }
 678 
 679   //  Generate CMS/G1 post-write barrier for array.
 680   //
 681   //  Input:
 682   //     addr     - register containing starting address
 683   //     count    - register containing element count
 684   //     tmp      - scratch register
 685   //
 686   //  The input registers and R0 are overwritten.
 687   //
 688   void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
 689     BarrierSet* const bs = Universe::heap()->barrier_set();
 690 
 691     switch (bs->kind()) {
 692       case BarrierSet::G1BarrierSet:
 693         {
 694           int spill_slots = (preserve != noreg) ? 1 : 0;
 695           const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 696 
 697           __ save_LR_CR(R0);
 698           __ push_frame(frame_size, R0);
 699           if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
 700           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
 701           if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
 702           __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
 703           __ restore_LR_CR(R0);
 704         }
 705         break;
 706       case BarrierSet::CardTableBarrierSet:
 707         {
 708           Label Lskip_loop, Lstore_loop;
 709           if (UseConcMarkSweepGC) {
 710             // TODO PPC port: contribute optimization / requires shared changes
 711             __ release();
 712           }
 713 
 714           CardTableBarrierSet* const ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
 715           CardTable* const ct = ctbs->card_table();
 716           assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
 717           assert_different_registers(addr, count, tmp);
 718 
 719           __ sldi(count, count, LogBytesPerHeapOop);
 720           __ addi(count, count, -BytesPerHeapOop);
 721           __ add(count, addr, count);
 722           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 723           __ srdi(addr, addr, CardTable::card_shift);
 724           __ srdi(count, count, CardTable::card_shift);
 725           __ subf(count, addr, count);
 726           assert_different_registers(R0, addr, count, tmp);
 727           __ load_const(tmp, (address)ct->byte_map_base());
 728           __ addic_(count, count, 1);
 729           __ beq(CCR0, Lskip_loop);
 730           __ li(R0, 0);
 731           __ mtctr(count);
 732           // Byte store loop
 733           __ bind(Lstore_loop);
 734           __ stbx(R0, tmp, addr);
 735           __ addi(addr, addr, 1);
 736           __ bdnz(Lstore_loop);
 737           __ bind(Lskip_loop);
 738         }
 739       break;
 740       case BarrierSet::ModRef:
 741         break;
 742       default:
 743         ShouldNotReachHere();
 744     }
 745   }
 746 
 747   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
 748   //
 749   // Arguments:
 750   //   to:
 751   //   count:
 752   //
 753   // Destroys:
 754   //
 755   address generate_zero_words_aligned8() {
 756     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
 757 
 758     // Implemented as in ClearArray.
 759     address start = __ function_entry();
 760 
 761     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
 762     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
 763     Register tmp1_reg       = R5_ARG3;
 764     Register tmp2_reg       = R6_ARG4;
 765     Register zero_reg       = R7_ARG5;
 766 
 767     // Procedure for large arrays (uses data cache block zero instruction).
 768     Label dwloop, fast, fastloop, restloop, lastdword, done;
 769     int cl_size = VM_Version::L1_data_cache_line_size();
 770     int cl_dwords = cl_size >> 3;
 771     int cl_dwordaddr_bits = exact_log2(cl_dwords);
 772     int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
 773 
 774     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
 775     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
 776     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
 777     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
 778     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
 779 
 780     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
 781     __ beq(CCR0, lastdword);                    // size <= 1
 782     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
 783     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
 784     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
 785 
 786     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
 787     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
 788 
 789     __ beq(CCR0, fast);                         // already 128byte aligned
 790     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
 791     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
 792 
 793     // Clear in first cache line dword-by-dword if not already 128byte aligned.
 794     __ bind(dwloop);
 795       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 796       __ addi(base_ptr_reg, base_ptr_reg, 8);
 797     __ bdnz(dwloop);
 798 
 799     // clear 128byte blocks
 800     __ bind(fast);
 801     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
 802     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
 803 
 804     __ mtctr(tmp1_reg);                         // load counter
 805     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
 806     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
 807 
 808     __ bind(fastloop);
 809       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
 810       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
 811     __ bdnz(fastloop);
 812 
 813     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
 814     __ beq(CCR0, lastdword);                    // rest<=1
 815     __ mtctr(tmp1_reg);                         // load counter
 816 
 817     // Clear rest.
 818     __ bind(restloop);
 819       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 820       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
 821       __ addi(base_ptr_reg, base_ptr_reg, 16);
 822     __ bdnz(restloop);
 823 
 824     __ bind(lastdword);
 825     __ beq(CCR1, done);
 826     __ std(zero_reg, 0, base_ptr_reg);
 827     __ bind(done);
 828     __ blr();                                   // return
 829 
 830     return start;
 831   }
 832 
 833 #if !defined(PRODUCT)
 834   // Wrapper which calls oopDesc::is_oop_or_null()
 835   // Only called by MacroAssembler::verify_oop
 836   static void verify_oop_helper(const char* message, oop o) {
 837     if (!oopDesc::is_oop_or_null(o)) {
 838       fatal("%s", message);
 839     }
 840     ++ StubRoutines::_verify_oop_count;
 841   }
 842 #endif
 843 
 844   // Return address of code to be called from code generated by
 845   // MacroAssembler::verify_oop.
 846   //
 847   // Don't generate, rather use C++ code.
 848   address generate_verify_oop() {
 849     // this is actually a `FunctionDescriptor*'.
 850     address start = 0;
 851 
 852 #if !defined(PRODUCT)
 853     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 854 #endif
 855 
 856     return start;
 857   }
 858 
 859   // Fairer handling of safepoints for native methods.
 860   //
 861   // Generate code which reads from the polling page. This special handling is needed as the
 862   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 863   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 864   // to read from the safepoint polling page.
 865   address generate_load_from_poll() {
 866     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 867     address start = __ function_entry();
 868     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 869     return start;
 870   }
 871 
 872   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 873   //
 874   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 875   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 876   //
 877   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 878   // for turning on loop predication optimization, and hence the behavior of "array range check"
 879   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 880   //
 881   // Generate stub for disjoint short fill. If "aligned" is true, the
 882   // "to" address is assumed to be heapword aligned.
 883   //
 884   // Arguments for generated stub:
 885   //   to:    R3_ARG1
 886   //   value: R4_ARG2
 887   //   count: R5_ARG3 treated as signed
 888   //
 889   address generate_fill(BasicType t, bool aligned, const char* name) {
 890     StubCodeMark mark(this, "StubRoutines", name);
 891     address start = __ function_entry();
 892 
 893     const Register to    = R3_ARG1;   // source array address
 894     const Register value = R4_ARG2;   // fill value
 895     const Register count = R5_ARG3;   // elements count
 896     const Register temp  = R6_ARG4;   // temp register
 897 
 898     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
 899 
 900     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
 901     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
 902 
 903     int shift = -1;
 904     switch (t) {
 905        case T_BYTE:
 906         shift = 2;
 907         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 908         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
 909         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 910         __ blt(CCR0, L_fill_elements);
 911         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 912         break;
 913        case T_SHORT:
 914         shift = 1;
 915         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 916         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 917         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 918         __ blt(CCR0, L_fill_elements);
 919         break;
 920       case T_INT:
 921         shift = 0;
 922         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 923         __ blt(CCR0, L_fill_4_bytes);
 924         break;
 925       default: ShouldNotReachHere();
 926     }
 927 
 928     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
 929       // Align source address at 4 bytes address boundary.
 930       if (t == T_BYTE) {
 931         // One byte misalignment happens only for byte arrays.
 932         __ andi_(temp, to, 1);
 933         __ beq(CCR0, L_skip_align1);
 934         __ stb(value, 0, to);
 935         __ addi(to, to, 1);
 936         __ addi(count, count, -1);
 937         __ bind(L_skip_align1);
 938       }
 939       // Two bytes misalignment happens only for byte and short (char) arrays.
 940       __ andi_(temp, to, 2);
 941       __ beq(CCR0, L_skip_align2);
 942       __ sth(value, 0, to);
 943       __ addi(to, to, 2);
 944       __ addi(count, count, -(1 << (shift - 1)));
 945       __ bind(L_skip_align2);
 946     }
 947 
 948     if (!aligned) {
 949       // Align to 8 bytes, we know we are 4 byte aligned to start.
 950       __ andi_(temp, to, 7);
 951       __ beq(CCR0, L_fill_32_bytes);
 952       __ stw(value, 0, to);
 953       __ addi(to, to, 4);
 954       __ addi(count, count, -(1 << shift));
 955       __ bind(L_fill_32_bytes);
 956     }
 957 
 958     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
 959     // Clone bytes int->long as above.
 960     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
 961 
 962     Label L_check_fill_8_bytes;
 963     // Fill 32-byte chunks.
 964     __ subf_(count, temp, count);
 965     __ blt(CCR0, L_check_fill_8_bytes);
 966 
 967     Label L_fill_32_bytes_loop;
 968     __ align(32);
 969     __ bind(L_fill_32_bytes_loop);
 970 
 971     __ std(value, 0, to);
 972     __ std(value, 8, to);
 973     __ subf_(count, temp, count);           // Update count.
 974     __ std(value, 16, to);
 975     __ std(value, 24, to);
 976 
 977     __ addi(to, to, 32);
 978     __ bge(CCR0, L_fill_32_bytes_loop);
 979 
 980     __ bind(L_check_fill_8_bytes);
 981     __ add_(count, temp, count);
 982     __ beq(CCR0, L_exit);
 983     __ addic_(count, count, -(2 << shift));
 984     __ blt(CCR0, L_fill_4_bytes);
 985 
 986     //
 987     // Length is too short, just fill 8 bytes at a time.
 988     //
 989     Label L_fill_8_bytes_loop;
 990     __ bind(L_fill_8_bytes_loop);
 991     __ std(value, 0, to);
 992     __ addic_(count, count, -(2 << shift));
 993     __ addi(to, to, 8);
 994     __ bge(CCR0, L_fill_8_bytes_loop);
 995 
 996     // Fill trailing 4 bytes.
 997     __ bind(L_fill_4_bytes);
 998     __ andi_(temp, count, 1<<shift);
 999     __ beq(CCR0, L_fill_2_bytes);
1000 
1001     __ stw(value, 0, to);
1002     if (t == T_BYTE || t == T_SHORT) {
1003       __ addi(to, to, 4);
1004       // Fill trailing 2 bytes.
1005       __ bind(L_fill_2_bytes);
1006       __ andi_(temp, count, 1<<(shift-1));
1007       __ beq(CCR0, L_fill_byte);
1008       __ sth(value, 0, to);
1009       if (t == T_BYTE) {
1010         __ addi(to, to, 2);
1011         // Fill trailing byte.
1012         __ bind(L_fill_byte);
1013         __ andi_(count, count, 1);
1014         __ beq(CCR0, L_exit);
1015         __ stb(value, 0, to);
1016       } else {
1017         __ bind(L_fill_byte);
1018       }
1019     } else {
1020       __ bind(L_fill_2_bytes);
1021     }
1022     __ bind(L_exit);
1023     __ blr();
1024 
1025     // Handle copies less than 8 bytes. Int is handled elsewhere.
1026     if (t == T_BYTE) {
1027       __ bind(L_fill_elements);
1028       Label L_fill_2, L_fill_4;
1029       __ andi_(temp, count, 1);
1030       __ beq(CCR0, L_fill_2);
1031       __ stb(value, 0, to);
1032       __ addi(to, to, 1);
1033       __ bind(L_fill_2);
1034       __ andi_(temp, count, 2);
1035       __ beq(CCR0, L_fill_4);
1036       __ stb(value, 0, to);
1037       __ stb(value, 0, to);
1038       __ addi(to, to, 2);
1039       __ bind(L_fill_4);
1040       __ andi_(temp, count, 4);
1041       __ beq(CCR0, L_exit);
1042       __ stb(value, 0, to);
1043       __ stb(value, 1, to);
1044       __ stb(value, 2, to);
1045       __ stb(value, 3, to);
1046       __ blr();
1047     }
1048 
1049     if (t == T_SHORT) {
1050       Label L_fill_2;
1051       __ bind(L_fill_elements);
1052       __ andi_(temp, count, 1);
1053       __ beq(CCR0, L_fill_2);
1054       __ sth(value, 0, to);
1055       __ addi(to, to, 2);
1056       __ bind(L_fill_2);
1057       __ andi_(temp, count, 2);
1058       __ beq(CCR0, L_exit);
1059       __ sth(value, 0, to);
1060       __ sth(value, 2, to);
1061       __ blr();
1062     }
1063     return start;
1064   }
1065 
1066   inline void assert_positive_int(Register count) {
1067 #ifdef ASSERT
1068     __ srdi_(R0, count, 31);
1069     __ asm_assert_eq("missing zero extend", 0xAFFE);
1070 #endif
1071   }
1072 
1073   // Generate overlap test for array copy stubs.
1074   //
1075   // Input:
1076   //   R3_ARG1    -  from
1077   //   R4_ARG2    -  to
1078   //   R5_ARG3    -  element count
1079   //
1080   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1081     Register tmp1 = R6_ARG4;
1082     Register tmp2 = R7_ARG5;
1083 
1084     assert_positive_int(R5_ARG3);
1085 
1086     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1087     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1088     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1089     __ cmpld(CCR1, tmp1, tmp2);
1090     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
1091     // Overlaps if Src before dst and distance smaller than size.
1092     // Branch to forward copy routine otherwise (within range of 32kB).
1093     __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
1094 
1095     // need to copy backwards
1096   }
1097 
1098   // The guideline in the implementations of generate_disjoint_xxx_copy
1099   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1100   // single instructions, but to avoid alignment interrupts (see subsequent
1101   // comment). Furthermore, we try to minimize misaligned access, even
1102   // though they cause no alignment interrupt.
1103   //
1104   // In Big-Endian mode, the PowerPC architecture requires implementations to
1105   // handle automatically misaligned integer halfword and word accesses,
1106   // word-aligned integer doubleword accesses, and word-aligned floating-point
1107   // accesses. Other accesses may or may not generate an Alignment interrupt
1108   // depending on the implementation.
1109   // Alignment interrupt handling may require on the order of hundreds of cycles,
1110   // so every effort should be made to avoid misaligned memory values.
1111   //
1112   //
1113   // Generate stub for disjoint byte copy.  If "aligned" is true, the
1114   // "from" and "to" addresses are assumed to be heapword aligned.
1115   //
1116   // Arguments for generated stub:
1117   //      from:  R3_ARG1
1118   //      to:    R4_ARG2
1119   //      count: R5_ARG3 treated as signed
1120   //
1121   address generate_disjoint_byte_copy(bool aligned, const char * name) {
1122     StubCodeMark mark(this, "StubRoutines", name);
1123     address start = __ function_entry();
1124     assert_positive_int(R5_ARG3);
1125 
1126     Register tmp1 = R6_ARG4;
1127     Register tmp2 = R7_ARG5;
1128     Register tmp3 = R8_ARG6;
1129     Register tmp4 = R9_ARG7;
1130 
1131     VectorSRegister tmp_vsr1  = VSR1;
1132     VectorSRegister tmp_vsr2  = VSR2;
1133 
1134     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10;
1135 
1136     // Don't try anything fancy if arrays don't have many elements.
1137     __ li(tmp3, 0);
1138     __ cmpwi(CCR0, R5_ARG3, 17);
1139     __ ble(CCR0, l_6); // copy 4 at a time
1140 
1141     if (!aligned) {
1142       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1143       __ andi_(tmp1, tmp1, 3);
1144       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1145 
1146       // Copy elements if necessary to align to 4 bytes.
1147       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1148       __ andi_(tmp1, tmp1, 3);
1149       __ beq(CCR0, l_2);
1150 
1151       __ subf(R5_ARG3, tmp1, R5_ARG3);
1152       __ bind(l_9);
1153       __ lbz(tmp2, 0, R3_ARG1);
1154       __ addic_(tmp1, tmp1, -1);
1155       __ stb(tmp2, 0, R4_ARG2);
1156       __ addi(R3_ARG1, R3_ARG1, 1);
1157       __ addi(R4_ARG2, R4_ARG2, 1);
1158       __ bne(CCR0, l_9);
1159 
1160       __ bind(l_2);
1161     }
1162 
1163     // copy 8 elements at a time
1164     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1165     __ andi_(tmp1, tmp2, 7);
1166     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1167 
1168     // copy a 2-element word if necessary to align to 8 bytes
1169     __ andi_(R0, R3_ARG1, 7);
1170     __ beq(CCR0, l_7);
1171 
1172     __ lwzx(tmp2, R3_ARG1, tmp3);
1173     __ addi(R5_ARG3, R5_ARG3, -4);
1174     __ stwx(tmp2, R4_ARG2, tmp3);
1175     { // FasterArrayCopy
1176       __ addi(R3_ARG1, R3_ARG1, 4);
1177       __ addi(R4_ARG2, R4_ARG2, 4);
1178     }
1179     __ bind(l_7);
1180 
1181     { // FasterArrayCopy
1182       __ cmpwi(CCR0, R5_ARG3, 31);
1183       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1184 
1185       __ srdi(tmp1, R5_ARG3, 5);
1186       __ andi_(R5_ARG3, R5_ARG3, 31);
1187       __ mtctr(tmp1);
1188 
1189      if (!VM_Version::has_vsx()) {
1190 
1191       __ bind(l_8);
1192       // Use unrolled version for mass copying (copy 32 elements a time)
1193       // Load feeding store gets zero latency on Power6, however not on Power5.
1194       // Therefore, the following sequence is made for the good of both.
1195       __ ld(tmp1, 0, R3_ARG1);
1196       __ ld(tmp2, 8, R3_ARG1);
1197       __ ld(tmp3, 16, R3_ARG1);
1198       __ ld(tmp4, 24, R3_ARG1);
1199       __ std(tmp1, 0, R4_ARG2);
1200       __ std(tmp2, 8, R4_ARG2);
1201       __ std(tmp3, 16, R4_ARG2);
1202       __ std(tmp4, 24, R4_ARG2);
1203       __ addi(R3_ARG1, R3_ARG1, 32);
1204       __ addi(R4_ARG2, R4_ARG2, 32);
1205       __ bdnz(l_8);
1206 
1207     } else { // Processor supports VSX, so use it to mass copy.
1208 
1209       // Prefetch the data into the L2 cache.
1210       __ dcbt(R3_ARG1, 0);
1211 
1212       // If supported set DSCR pre-fetch to deepest.
1213       if (VM_Version::has_mfdscr()) {
1214         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1215         __ mtdscr(tmp2);
1216       }
1217 
1218       __ li(tmp1, 16);
1219 
1220       // Backbranch target aligned to 32-byte. Not 16-byte align as
1221       // loop contains < 8 instructions that fit inside a single
1222       // i-cache sector.
1223       __ align(32);
1224 
1225       __ bind(l_10);
1226       // Use loop with VSX load/store instructions to
1227       // copy 32 elements a time.
1228       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1229       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1230       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1231       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1232       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1233       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1234       __ bdnz(l_10);                       // Dec CTR and loop if not zero.
1235 
1236       // Restore DSCR pre-fetch value.
1237       if (VM_Version::has_mfdscr()) {
1238         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1239         __ mtdscr(tmp2);
1240       }
1241 
1242     } // VSX
1243    } // FasterArrayCopy
1244 
1245     __ bind(l_6);
1246 
1247     // copy 4 elements at a time
1248     __ cmpwi(CCR0, R5_ARG3, 4);
1249     __ blt(CCR0, l_1);
1250     __ srdi(tmp1, R5_ARG3, 2);
1251     __ mtctr(tmp1); // is > 0
1252     __ andi_(R5_ARG3, R5_ARG3, 3);
1253 
1254     { // FasterArrayCopy
1255       __ addi(R3_ARG1, R3_ARG1, -4);
1256       __ addi(R4_ARG2, R4_ARG2, -4);
1257       __ bind(l_3);
1258       __ lwzu(tmp2, 4, R3_ARG1);
1259       __ stwu(tmp2, 4, R4_ARG2);
1260       __ bdnz(l_3);
1261       __ addi(R3_ARG1, R3_ARG1, 4);
1262       __ addi(R4_ARG2, R4_ARG2, 4);
1263     }
1264 
1265     // do single element copy
1266     __ bind(l_1);
1267     __ cmpwi(CCR0, R5_ARG3, 0);
1268     __ beq(CCR0, l_4);
1269 
1270     { // FasterArrayCopy
1271       __ mtctr(R5_ARG3);
1272       __ addi(R3_ARG1, R3_ARG1, -1);
1273       __ addi(R4_ARG2, R4_ARG2, -1);
1274 
1275       __ bind(l_5);
1276       __ lbzu(tmp2, 1, R3_ARG1);
1277       __ stbu(tmp2, 1, R4_ARG2);
1278       __ bdnz(l_5);
1279     }
1280 
1281     __ bind(l_4);
1282     __ li(R3_RET, 0); // return 0
1283     __ blr();
1284 
1285     return start;
1286   }
1287 
1288   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1289   // "from" and "to" addresses are assumed to be heapword aligned.
1290   //
1291   // Arguments for generated stub:
1292   //      from:  R3_ARG1
1293   //      to:    R4_ARG2
1294   //      count: R5_ARG3 treated as signed
1295   //
1296   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1297     StubCodeMark mark(this, "StubRoutines", name);
1298     address start = __ function_entry();
1299     assert_positive_int(R5_ARG3);
1300 
1301     Register tmp1 = R6_ARG4;
1302     Register tmp2 = R7_ARG5;
1303     Register tmp3 = R8_ARG6;
1304 
1305     address nooverlap_target = aligned ?
1306       STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
1307       STUB_ENTRY(jbyte_disjoint_arraycopy);
1308 
1309     array_overlap_test(nooverlap_target, 0);
1310     // Do reverse copy. We assume the case of actual overlap is rare enough
1311     // that we don't have to optimize it.
1312     Label l_1, l_2;
1313 
1314     __ b(l_2);
1315     __ bind(l_1);
1316     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1317     __ bind(l_2);
1318     __ addic_(R5_ARG3, R5_ARG3, -1);
1319     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1320     __ bge(CCR0, l_1);
1321 
1322     __ li(R3_RET, 0); // return 0
1323     __ blr();
1324 
1325     return start;
1326   }
1327 
1328   // Generate stub for disjoint short copy.  If "aligned" is true, the
1329   // "from" and "to" addresses are assumed to be heapword aligned.
1330   //
1331   // Arguments for generated stub:
1332   //      from:  R3_ARG1
1333   //      to:    R4_ARG2
1334   //  elm.count: R5_ARG3 treated as signed
1335   //
1336   // Strategy for aligned==true:
1337   //
1338   //  If length <= 9:
1339   //     1. copy 2 elements at a time (l_6)
1340   //     2. copy last element if original element count was odd (l_1)
1341   //
1342   //  If length > 9:
1343   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1344   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1345   //     3. copy last element if one was left in step 2. (l_1)
1346   //
1347   //
1348   // Strategy for aligned==false:
1349   //
1350   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1351   //                  can be unaligned (see comment below)
1352   //
1353   //  If length > 9:
1354   //     1. continue with step 6. if the alignment of from and to mod 4
1355   //        is different.
1356   //     2. align from and to to 4 bytes by copying 1 element if necessary
1357   //     3. at l_2 from and to are 4 byte aligned; continue with
1358   //        5. if they cannot be aligned to 8 bytes because they have
1359   //        got different alignment mod 8.
1360   //     4. at this point we know that both, from and to, have the same
1361   //        alignment mod 8, now copy one element if necessary to get
1362   //        8 byte alignment of from and to.
1363   //     5. copy 4 elements at a time until less than 4 elements are
1364   //        left; depending on step 3. all load/stores are aligned or
1365   //        either all loads or all stores are unaligned.
1366   //     6. copy 2 elements at a time until less than 2 elements are
1367   //        left (l_6); arriving here from step 1., there is a chance
1368   //        that all accesses are unaligned.
1369   //     7. copy last element if one was left in step 6. (l_1)
1370   //
1371   //  There are unaligned data accesses using integer load/store
1372   //  instructions in this stub. POWER allows such accesses.
1373   //
1374   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1375   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1376   //  integer load/stores have good performance. Only unaligned
1377   //  floating point load/stores can have poor performance.
1378   //
1379   //  TODO:
1380   //
1381   //  1. check if aligning the backbranch target of loops is beneficial
1382   //
1383   address generate_disjoint_short_copy(bool aligned, const char * name) {
1384     StubCodeMark mark(this, "StubRoutines", name);
1385 
1386     Register tmp1 = R6_ARG4;
1387     Register tmp2 = R7_ARG5;
1388     Register tmp3 = R8_ARG6;
1389     Register tmp4 = R9_ARG7;
1390 
1391     VectorSRegister tmp_vsr1  = VSR1;
1392     VectorSRegister tmp_vsr2  = VSR2;
1393 
1394     address start = __ function_entry();
1395     assert_positive_int(R5_ARG3);
1396 
1397     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1398 
1399     // don't try anything fancy if arrays don't have many elements
1400     __ li(tmp3, 0);
1401     __ cmpwi(CCR0, R5_ARG3, 9);
1402     __ ble(CCR0, l_6); // copy 2 at a time
1403 
1404     if (!aligned) {
1405       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1406       __ andi_(tmp1, tmp1, 3);
1407       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1408 
1409       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1410 
1411       // Copy 1 element if necessary to align to 4 bytes.
1412       __ andi_(tmp1, R3_ARG1, 3);
1413       __ beq(CCR0, l_2);
1414 
1415       __ lhz(tmp2, 0, R3_ARG1);
1416       __ addi(R3_ARG1, R3_ARG1, 2);
1417       __ sth(tmp2, 0, R4_ARG2);
1418       __ addi(R4_ARG2, R4_ARG2, 2);
1419       __ addi(R5_ARG3, R5_ARG3, -1);
1420       __ bind(l_2);
1421 
1422       // At this point the positions of both, from and to, are at least 4 byte aligned.
1423 
1424       // Copy 4 elements at a time.
1425       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1426       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1427       __ andi_(tmp1, tmp2, 7);
1428       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1429 
1430       // Copy a 2-element word if necessary to align to 8 bytes.
1431       __ andi_(R0, R3_ARG1, 7);
1432       __ beq(CCR0, l_7);
1433 
1434       __ lwzx(tmp2, R3_ARG1, tmp3);
1435       __ addi(R5_ARG3, R5_ARG3, -2);
1436       __ stwx(tmp2, R4_ARG2, tmp3);
1437       { // FasterArrayCopy
1438         __ addi(R3_ARG1, R3_ARG1, 4);
1439         __ addi(R4_ARG2, R4_ARG2, 4);
1440       }
1441     }
1442 
1443     __ bind(l_7);
1444 
1445     // Copy 4 elements at a time; either the loads or the stores can
1446     // be unaligned if aligned == false.
1447 
1448     { // FasterArrayCopy
1449       __ cmpwi(CCR0, R5_ARG3, 15);
1450       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1451 
1452       __ srdi(tmp1, R5_ARG3, 4);
1453       __ andi_(R5_ARG3, R5_ARG3, 15);
1454       __ mtctr(tmp1);
1455 
1456       if (!VM_Version::has_vsx()) {
1457 
1458         __ bind(l_8);
1459         // Use unrolled version for mass copying (copy 16 elements a time).
1460         // Load feeding store gets zero latency on Power6, however not on Power5.
1461         // Therefore, the following sequence is made for the good of both.
1462         __ ld(tmp1, 0, R3_ARG1);
1463         __ ld(tmp2, 8, R3_ARG1);
1464         __ ld(tmp3, 16, R3_ARG1);
1465         __ ld(tmp4, 24, R3_ARG1);
1466         __ std(tmp1, 0, R4_ARG2);
1467         __ std(tmp2, 8, R4_ARG2);
1468         __ std(tmp3, 16, R4_ARG2);
1469         __ std(tmp4, 24, R4_ARG2);
1470         __ addi(R3_ARG1, R3_ARG1, 32);
1471         __ addi(R4_ARG2, R4_ARG2, 32);
1472         __ bdnz(l_8);
1473 
1474       } else { // Processor supports VSX, so use it to mass copy.
1475 
1476         // Prefetch src data into L2 cache.
1477         __ dcbt(R3_ARG1, 0);
1478 
1479         // If supported set DSCR pre-fetch to deepest.
1480         if (VM_Version::has_mfdscr()) {
1481           __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1482           __ mtdscr(tmp2);
1483         }
1484         __ li(tmp1, 16);
1485 
1486         // Backbranch target aligned to 32-byte. It's not aligned 16-byte
1487         // as loop contains < 8 instructions that fit inside a single
1488         // i-cache sector.
1489         __ align(32);
1490 
1491         __ bind(l_9);
1492         // Use loop with VSX load/store instructions to
1493         // copy 16 elements a time.
1494         __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load from src.
1495         __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst.
1496         __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1);  // Load from src + 16.
1497         __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
1498         __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32.
1499         __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32.
1500         __ bdnz(l_9);                        // Dec CTR and loop if not zero.
1501 
1502         // Restore DSCR pre-fetch value.
1503         if (VM_Version::has_mfdscr()) {
1504           __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1505           __ mtdscr(tmp2);
1506         }
1507 
1508       }
1509     } // FasterArrayCopy
1510     __ bind(l_6);
1511 
1512     // copy 2 elements at a time
1513     { // FasterArrayCopy
1514       __ cmpwi(CCR0, R5_ARG3, 2);
1515       __ blt(CCR0, l_1);
1516       __ srdi(tmp1, R5_ARG3, 1);
1517       __ andi_(R5_ARG3, R5_ARG3, 1);
1518 
1519       __ addi(R3_ARG1, R3_ARG1, -4);
1520       __ addi(R4_ARG2, R4_ARG2, -4);
1521       __ mtctr(tmp1);
1522 
1523       __ bind(l_3);
1524       __ lwzu(tmp2, 4, R3_ARG1);
1525       __ stwu(tmp2, 4, R4_ARG2);
1526       __ bdnz(l_3);
1527 
1528       __ addi(R3_ARG1, R3_ARG1, 4);
1529       __ addi(R4_ARG2, R4_ARG2, 4);
1530     }
1531 
1532     // do single element copy
1533     __ bind(l_1);
1534     __ cmpwi(CCR0, R5_ARG3, 0);
1535     __ beq(CCR0, l_4);
1536 
1537     { // FasterArrayCopy
1538       __ mtctr(R5_ARG3);
1539       __ addi(R3_ARG1, R3_ARG1, -2);
1540       __ addi(R4_ARG2, R4_ARG2, -2);
1541 
1542       __ bind(l_5);
1543       __ lhzu(tmp2, 2, R3_ARG1);
1544       __ sthu(tmp2, 2, R4_ARG2);
1545       __ bdnz(l_5);
1546     }
1547     __ bind(l_4);
1548     __ li(R3_RET, 0); // return 0
1549     __ blr();
1550 
1551     return start;
1552   }
1553 
1554   // Generate stub for conjoint short copy.  If "aligned" is true, the
1555   // "from" and "to" addresses are assumed to be heapword aligned.
1556   //
1557   // Arguments for generated stub:
1558   //      from:  R3_ARG1
1559   //      to:    R4_ARG2
1560   //      count: R5_ARG3 treated as signed
1561   //
1562   address generate_conjoint_short_copy(bool aligned, const char * name) {
1563     StubCodeMark mark(this, "StubRoutines", name);
1564     address start = __ function_entry();
1565     assert_positive_int(R5_ARG3);
1566 
1567     Register tmp1 = R6_ARG4;
1568     Register tmp2 = R7_ARG5;
1569     Register tmp3 = R8_ARG6;
1570 
1571     address nooverlap_target = aligned ?
1572       STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
1573       STUB_ENTRY(jshort_disjoint_arraycopy);
1574 
1575     array_overlap_test(nooverlap_target, 1);
1576 
1577     Label l_1, l_2;
1578     __ sldi(tmp1, R5_ARG3, 1);
1579     __ b(l_2);
1580     __ bind(l_1);
1581     __ sthx(tmp2, R4_ARG2, tmp1);
1582     __ bind(l_2);
1583     __ addic_(tmp1, tmp1, -2);
1584     __ lhzx(tmp2, R3_ARG1, tmp1);
1585     __ bge(CCR0, l_1);
1586 
1587     __ li(R3_RET, 0); // return 0
1588     __ blr();
1589 
1590     return start;
1591   }
1592 
1593   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1594   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1595   //
1596   // Arguments:
1597   //      from:  R3_ARG1
1598   //      to:    R4_ARG2
1599   //      count: R5_ARG3 treated as signed
1600   //
1601   void generate_disjoint_int_copy_core(bool aligned) {
1602     Register tmp1 = R6_ARG4;
1603     Register tmp2 = R7_ARG5;
1604     Register tmp3 = R8_ARG6;
1605     Register tmp4 = R0;
1606 
1607     VectorSRegister tmp_vsr1  = VSR1;
1608     VectorSRegister tmp_vsr2  = VSR2;
1609 
1610     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1611 
1612     // for short arrays, just do single element copy
1613     __ li(tmp3, 0);
1614     __ cmpwi(CCR0, R5_ARG3, 5);
1615     __ ble(CCR0, l_2);
1616 
1617     if (!aligned) {
1618         // check if arrays have same alignment mod 8.
1619         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1620         __ andi_(R0, tmp1, 7);
1621         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1622         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1623 
1624         // copy 1 element to align to and from on an 8 byte boundary
1625         __ andi_(R0, R3_ARG1, 7);
1626         __ beq(CCR0, l_4);
1627 
1628         __ lwzx(tmp2, R3_ARG1, tmp3);
1629         __ addi(R5_ARG3, R5_ARG3, -1);
1630         __ stwx(tmp2, R4_ARG2, tmp3);
1631         { // FasterArrayCopy
1632           __ addi(R3_ARG1, R3_ARG1, 4);
1633           __ addi(R4_ARG2, R4_ARG2, 4);
1634         }
1635         __ bind(l_4);
1636       }
1637 
1638     { // FasterArrayCopy
1639       __ cmpwi(CCR0, R5_ARG3, 7);
1640       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1641 
1642       __ srdi(tmp1, R5_ARG3, 3);
1643       __ andi_(R5_ARG3, R5_ARG3, 7);
1644       __ mtctr(tmp1);
1645 
1646      if (!VM_Version::has_vsx()) {
1647 
1648       __ bind(l_6);
1649       // Use unrolled version for mass copying (copy 8 elements a time).
1650       // Load feeding store gets zero latency on power6, however not on power 5.
1651       // Therefore, the following sequence is made for the good of both.
1652       __ ld(tmp1, 0, R3_ARG1);
1653       __ ld(tmp2, 8, R3_ARG1);
1654       __ ld(tmp3, 16, R3_ARG1);
1655       __ ld(tmp4, 24, R3_ARG1);
1656       __ std(tmp1, 0, R4_ARG2);
1657       __ std(tmp2, 8, R4_ARG2);
1658       __ std(tmp3, 16, R4_ARG2);
1659       __ std(tmp4, 24, R4_ARG2);
1660       __ addi(R3_ARG1, R3_ARG1, 32);
1661       __ addi(R4_ARG2, R4_ARG2, 32);
1662       __ bdnz(l_6);
1663 
1664     } else { // Processor supports VSX, so use it to mass copy.
1665 
1666       // Prefetch the data into the L2 cache.
1667       __ dcbt(R3_ARG1, 0);
1668 
1669       // If supported set DSCR pre-fetch to deepest.
1670       if (VM_Version::has_mfdscr()) {
1671         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1672         __ mtdscr(tmp2);
1673       }
1674 
1675       __ li(tmp1, 16);
1676 
1677       // Backbranch target aligned to 32-byte. Not 16-byte align as
1678       // loop contains < 8 instructions that fit inside a single
1679       // i-cache sector.
1680       __ align(32);
1681 
1682       __ bind(l_7);
1683       // Use loop with VSX load/store instructions to
1684       // copy 8 elements a time.
1685       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1686       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1687       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1688       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1689       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1690       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1691       __ bdnz(l_7);                        // Dec CTR and loop if not zero.
1692 
1693       // Restore DSCR pre-fetch value.
1694       if (VM_Version::has_mfdscr()) {
1695         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1696         __ mtdscr(tmp2);
1697       }
1698 
1699     } // VSX
1700    } // FasterArrayCopy
1701 
1702     // copy 1 element at a time
1703     __ bind(l_2);
1704     __ cmpwi(CCR0, R5_ARG3, 0);
1705     __ beq(CCR0, l_1);
1706 
1707     { // FasterArrayCopy
1708       __ mtctr(R5_ARG3);
1709       __ addi(R3_ARG1, R3_ARG1, -4);
1710       __ addi(R4_ARG2, R4_ARG2, -4);
1711 
1712       __ bind(l_3);
1713       __ lwzu(tmp2, 4, R3_ARG1);
1714       __ stwu(tmp2, 4, R4_ARG2);
1715       __ bdnz(l_3);
1716     }
1717 
1718     __ bind(l_1);
1719     return;
1720   }
1721 
1722   // Generate stub for disjoint int copy.  If "aligned" is true, the
1723   // "from" and "to" addresses are assumed to be heapword aligned.
1724   //
1725   // Arguments for generated stub:
1726   //      from:  R3_ARG1
1727   //      to:    R4_ARG2
1728   //      count: R5_ARG3 treated as signed
1729   //
1730   address generate_disjoint_int_copy(bool aligned, const char * name) {
1731     StubCodeMark mark(this, "StubRoutines", name);
1732     address start = __ function_entry();
1733     assert_positive_int(R5_ARG3);
1734     generate_disjoint_int_copy_core(aligned);
1735     __ li(R3_RET, 0); // return 0
1736     __ blr();
1737     return start;
1738   }
1739 
1740   // Generate core code for conjoint int copy (and oop copy on
1741   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1742   // are assumed to be heapword aligned.
1743   //
1744   // Arguments:
1745   //      from:  R3_ARG1
1746   //      to:    R4_ARG2
1747   //      count: R5_ARG3 treated as signed
1748   //
1749   void generate_conjoint_int_copy_core(bool aligned) {
1750     // Do reverse copy.  We assume the case of actual overlap is rare enough
1751     // that we don't have to optimize it.
1752 
1753     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1754 
1755     Register tmp1 = R6_ARG4;
1756     Register tmp2 = R7_ARG5;
1757     Register tmp3 = R8_ARG6;
1758     Register tmp4 = R0;
1759 
1760     VectorSRegister tmp_vsr1  = VSR1;
1761     VectorSRegister tmp_vsr2  = VSR2;
1762 
1763     { // FasterArrayCopy
1764       __ cmpwi(CCR0, R5_ARG3, 0);
1765       __ beq(CCR0, l_6);
1766 
1767       __ sldi(R5_ARG3, R5_ARG3, 2);
1768       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1769       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1770       __ srdi(R5_ARG3, R5_ARG3, 2);
1771 
1772       if (!aligned) {
1773         // check if arrays have same alignment mod 8.
1774         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1775         __ andi_(R0, tmp1, 7);
1776         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1777         __ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time
1778 
1779         // copy 1 element to align to and from on an 8 byte boundary
1780         __ andi_(R0, R3_ARG1, 7);
1781         __ beq(CCR0, l_7);
1782 
1783         __ addi(R3_ARG1, R3_ARG1, -4);
1784         __ addi(R4_ARG2, R4_ARG2, -4);
1785         __ addi(R5_ARG3, R5_ARG3, -1);
1786         __ lwzx(tmp2, R3_ARG1);
1787         __ stwx(tmp2, R4_ARG2);
1788         __ bind(l_7);
1789       }
1790 
1791       __ cmpwi(CCR0, R5_ARG3, 7);
1792       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1793 
1794       __ srdi(tmp1, R5_ARG3, 3);
1795       __ andi(R5_ARG3, R5_ARG3, 7);
1796       __ mtctr(tmp1);
1797 
1798      if (!VM_Version::has_vsx()) {
1799       __ bind(l_4);
1800       // Use unrolled version for mass copying (copy 4 elements a time).
1801       // Load feeding store gets zero latency on Power6, however not on Power5.
1802       // Therefore, the following sequence is made for the good of both.
1803       __ addi(R3_ARG1, R3_ARG1, -32);
1804       __ addi(R4_ARG2, R4_ARG2, -32);
1805       __ ld(tmp4, 24, R3_ARG1);
1806       __ ld(tmp3, 16, R3_ARG1);
1807       __ ld(tmp2, 8, R3_ARG1);
1808       __ ld(tmp1, 0, R3_ARG1);
1809       __ std(tmp4, 24, R4_ARG2);
1810       __ std(tmp3, 16, R4_ARG2);
1811       __ std(tmp2, 8, R4_ARG2);
1812       __ std(tmp1, 0, R4_ARG2);
1813       __ bdnz(l_4);
1814      } else {  // Processor supports VSX, so use it to mass copy.
1815       // Prefetch the data into the L2 cache.
1816       __ dcbt(R3_ARG1, 0);
1817 
1818       // If supported set DSCR pre-fetch to deepest.
1819       if (VM_Version::has_mfdscr()) {
1820         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1821         __ mtdscr(tmp2);
1822       }
1823 
1824       __ li(tmp1, 16);
1825 
1826       // Backbranch target aligned to 32-byte. Not 16-byte align as
1827       // loop contains < 8 instructions that fit inside a single
1828       // i-cache sector.
1829       __ align(32);
1830 
1831       __ bind(l_4);
1832       // Use loop with VSX load/store instructions to
1833       // copy 8 elements a time.
1834       __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
1835       __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
1836       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
1837       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1838       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
1839       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1840       __ bdnz(l_4);
1841 
1842       // Restore DSCR pre-fetch value.
1843       if (VM_Version::has_mfdscr()) {
1844         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1845         __ mtdscr(tmp2);
1846       }
1847      }
1848 
1849       __ cmpwi(CCR0, R5_ARG3, 0);
1850       __ beq(CCR0, l_6);
1851 
1852       __ bind(l_5);
1853       __ mtctr(R5_ARG3);
1854       __ bind(l_3);
1855       __ lwz(R0, -4, R3_ARG1);
1856       __ stw(R0, -4, R4_ARG2);
1857       __ addi(R3_ARG1, R3_ARG1, -4);
1858       __ addi(R4_ARG2, R4_ARG2, -4);
1859       __ bdnz(l_3);
1860 
1861       __ bind(l_6);
1862     }
1863   }
1864 
1865   // Generate stub for conjoint int copy.  If "aligned" is true, the
1866   // "from" and "to" addresses are assumed to be heapword aligned.
1867   //
1868   // Arguments for generated stub:
1869   //      from:  R3_ARG1
1870   //      to:    R4_ARG2
1871   //      count: R5_ARG3 treated as signed
1872   //
1873   address generate_conjoint_int_copy(bool aligned, const char * name) {
1874     StubCodeMark mark(this, "StubRoutines", name);
1875     address start = __ function_entry();
1876     assert_positive_int(R5_ARG3);
1877     address nooverlap_target = aligned ?
1878       STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
1879       STUB_ENTRY(jint_disjoint_arraycopy);
1880 
1881     array_overlap_test(nooverlap_target, 2);
1882 
1883     generate_conjoint_int_copy_core(aligned);
1884 
1885     __ li(R3_RET, 0); // return 0
1886     __ blr();
1887 
1888     return start;
1889   }
1890 
1891   // Generate core code for disjoint long copy (and oop copy on
1892   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1893   // are assumed to be heapword aligned.
1894   //
1895   // Arguments:
1896   //      from:  R3_ARG1
1897   //      to:    R4_ARG2
1898   //      count: R5_ARG3 treated as signed
1899   //
1900   void generate_disjoint_long_copy_core(bool aligned) {
1901     Register tmp1 = R6_ARG4;
1902     Register tmp2 = R7_ARG5;
1903     Register tmp3 = R8_ARG6;
1904     Register tmp4 = R0;
1905 
1906     Label l_1, l_2, l_3, l_4, l_5;
1907 
1908     VectorSRegister tmp_vsr1  = VSR1;
1909     VectorSRegister tmp_vsr2  = VSR2;
1910 
1911     { // FasterArrayCopy
1912       __ cmpwi(CCR0, R5_ARG3, 3);
1913       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1914 
1915       __ srdi(tmp1, R5_ARG3, 2);
1916       __ andi_(R5_ARG3, R5_ARG3, 3);
1917       __ mtctr(tmp1);
1918 
1919     if (!VM_Version::has_vsx()) {
1920       __ bind(l_4);
1921       // Use unrolled version for mass copying (copy 4 elements a time).
1922       // Load feeding store gets zero latency on Power6, however not on Power5.
1923       // Therefore, the following sequence is made for the good of both.
1924       __ ld(tmp1, 0, R3_ARG1);
1925       __ ld(tmp2, 8, R3_ARG1);
1926       __ ld(tmp3, 16, R3_ARG1);
1927       __ ld(tmp4, 24, R3_ARG1);
1928       __ std(tmp1, 0, R4_ARG2);
1929       __ std(tmp2, 8, R4_ARG2);
1930       __ std(tmp3, 16, R4_ARG2);
1931       __ std(tmp4, 24, R4_ARG2);
1932       __ addi(R3_ARG1, R3_ARG1, 32);
1933       __ addi(R4_ARG2, R4_ARG2, 32);
1934       __ bdnz(l_4);
1935 
1936     } else { // Processor supports VSX, so use it to mass copy.
1937 
1938       // Prefetch the data into the L2 cache.
1939       __ dcbt(R3_ARG1, 0);
1940 
1941       // If supported set DSCR pre-fetch to deepest.
1942       if (VM_Version::has_mfdscr()) {
1943         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1944         __ mtdscr(tmp2);
1945       }
1946 
1947       __ li(tmp1, 16);
1948 
1949       // Backbranch target aligned to 32-byte. Not 16-byte align as
1950       // loop contains < 8 instructions that fit inside a single
1951       // i-cache sector.
1952       __ align(32);
1953 
1954       __ bind(l_5);
1955       // Use loop with VSX load/store instructions to
1956       // copy 4 elements a time.
1957       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1958       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1959       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1960       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1961       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1962       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1963       __ bdnz(l_5);                        // Dec CTR and loop if not zero.
1964 
1965       // Restore DSCR pre-fetch value.
1966       if (VM_Version::has_mfdscr()) {
1967         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1968         __ mtdscr(tmp2);
1969       }
1970 
1971     } // VSX
1972    } // FasterArrayCopy
1973 
1974     // copy 1 element at a time
1975     __ bind(l_3);
1976     __ cmpwi(CCR0, R5_ARG3, 0);
1977     __ beq(CCR0, l_1);
1978 
1979     { // FasterArrayCopy
1980       __ mtctr(R5_ARG3);
1981       __ addi(R3_ARG1, R3_ARG1, -8);
1982       __ addi(R4_ARG2, R4_ARG2, -8);
1983 
1984       __ bind(l_2);
1985       __ ldu(R0, 8, R3_ARG1);
1986       __ stdu(R0, 8, R4_ARG2);
1987       __ bdnz(l_2);
1988 
1989     }
1990     __ bind(l_1);
1991   }
1992 
1993   // Generate stub for disjoint long copy.  If "aligned" is true, the
1994   // "from" and "to" addresses are assumed to be heapword aligned.
1995   //
1996   // Arguments for generated stub:
1997   //      from:  R3_ARG1
1998   //      to:    R4_ARG2
1999   //      count: R5_ARG3 treated as signed
2000   //
2001   address generate_disjoint_long_copy(bool aligned, const char * name) {
2002     StubCodeMark mark(this, "StubRoutines", name);
2003     address start = __ function_entry();
2004     assert_positive_int(R5_ARG3);
2005     generate_disjoint_long_copy_core(aligned);
2006     __ li(R3_RET, 0); // return 0
2007     __ blr();
2008 
2009     return start;
2010   }
2011 
2012   // Generate core code for conjoint long copy (and oop copy on
2013   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
2014   // are assumed to be heapword aligned.
2015   //
2016   // Arguments:
2017   //      from:  R3_ARG1
2018   //      to:    R4_ARG2
2019   //      count: R5_ARG3 treated as signed
2020   //
2021   void generate_conjoint_long_copy_core(bool aligned) {
2022     Register tmp1 = R6_ARG4;
2023     Register tmp2 = R7_ARG5;
2024     Register tmp3 = R8_ARG6;
2025     Register tmp4 = R0;
2026 
2027     VectorSRegister tmp_vsr1  = VSR1;
2028     VectorSRegister tmp_vsr2  = VSR2;
2029 
2030     Label l_1, l_2, l_3, l_4, l_5;
2031 
2032     __ cmpwi(CCR0, R5_ARG3, 0);
2033     __ beq(CCR0, l_1);
2034 
2035     { // FasterArrayCopy
2036       __ sldi(R5_ARG3, R5_ARG3, 3);
2037       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
2038       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
2039       __ srdi(R5_ARG3, R5_ARG3, 3);
2040 
2041       __ cmpwi(CCR0, R5_ARG3, 3);
2042       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
2043 
2044       __ srdi(tmp1, R5_ARG3, 2);
2045       __ andi(R5_ARG3, R5_ARG3, 3);
2046       __ mtctr(tmp1);
2047 
2048      if (!VM_Version::has_vsx()) {
2049       __ bind(l_4);
2050       // Use unrolled version for mass copying (copy 4 elements a time).
2051       // Load feeding store gets zero latency on Power6, however not on Power5.
2052       // Therefore, the following sequence is made for the good of both.
2053       __ addi(R3_ARG1, R3_ARG1, -32);
2054       __ addi(R4_ARG2, R4_ARG2, -32);
2055       __ ld(tmp4, 24, R3_ARG1);
2056       __ ld(tmp3, 16, R3_ARG1);
2057       __ ld(tmp2, 8, R3_ARG1);
2058       __ ld(tmp1, 0, R3_ARG1);
2059       __ std(tmp4, 24, R4_ARG2);
2060       __ std(tmp3, 16, R4_ARG2);
2061       __ std(tmp2, 8, R4_ARG2);
2062       __ std(tmp1, 0, R4_ARG2);
2063       __ bdnz(l_4);
2064      } else { // Processor supports VSX, so use it to mass copy.
2065       // Prefetch the data into the L2 cache.
2066       __ dcbt(R3_ARG1, 0);
2067 
2068       // If supported set DSCR pre-fetch to deepest.
2069       if (VM_Version::has_mfdscr()) {
2070         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
2071         __ mtdscr(tmp2);
2072       }
2073 
2074       __ li(tmp1, 16);
2075 
2076       // Backbranch target aligned to 32-byte. Not 16-byte align as
2077       // loop contains < 8 instructions that fit inside a single
2078       // i-cache sector.
2079       __ align(32);
2080 
2081       __ bind(l_4);
2082       // Use loop with VSX load/store instructions to
2083       // copy 4 elements a time.
2084       __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
2085       __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
2086       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
2087       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
2088       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
2089       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
2090       __ bdnz(l_4);
2091 
2092       // Restore DSCR pre-fetch value.
2093       if (VM_Version::has_mfdscr()) {
2094         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
2095         __ mtdscr(tmp2);
2096       }
2097      }
2098 
2099       __ cmpwi(CCR0, R5_ARG3, 0);
2100       __ beq(CCR0, l_1);
2101 
2102       __ bind(l_5);
2103       __ mtctr(R5_ARG3);
2104       __ bind(l_3);
2105       __ ld(R0, -8, R3_ARG1);
2106       __ std(R0, -8, R4_ARG2);
2107       __ addi(R3_ARG1, R3_ARG1, -8);
2108       __ addi(R4_ARG2, R4_ARG2, -8);
2109       __ bdnz(l_3);
2110 
2111     }
2112     __ bind(l_1);
2113   }
2114 
2115   // Generate stub for conjoint long copy.  If "aligned" is true, the
2116   // "from" and "to" addresses are assumed to be heapword aligned.
2117   //
2118   // Arguments for generated stub:
2119   //      from:  R3_ARG1
2120   //      to:    R4_ARG2
2121   //      count: R5_ARG3 treated as signed
2122   //
2123   address generate_conjoint_long_copy(bool aligned, const char * name) {
2124     StubCodeMark mark(this, "StubRoutines", name);
2125     address start = __ function_entry();
2126     assert_positive_int(R5_ARG3);
2127     address nooverlap_target = aligned ?
2128       STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
2129       STUB_ENTRY(jlong_disjoint_arraycopy);
2130 
2131     array_overlap_test(nooverlap_target, 3);
2132     generate_conjoint_long_copy_core(aligned);
2133 
2134     __ li(R3_RET, 0); // return 0
2135     __ blr();
2136 
2137     return start;
2138   }
2139 
2140   // Generate stub for conjoint oop copy.  If "aligned" is true, the
2141   // "from" and "to" addresses are assumed to be heapword aligned.
2142   //
2143   // Arguments for generated stub:
2144   //      from:  R3_ARG1
2145   //      to:    R4_ARG2
2146   //      count: R5_ARG3 treated as signed
2147   //      dest_uninitialized: G1 support
2148   //
2149   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2150     StubCodeMark mark(this, "StubRoutines", name);
2151 
2152     address start = __ function_entry();
2153     assert_positive_int(R5_ARG3);
2154     address nooverlap_target = aligned ?
2155       STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
2156       STUB_ENTRY(oop_disjoint_arraycopy);
2157 
2158     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
2159 
2160     // Save arguments.
2161     __ mr(R9_ARG7, R4_ARG2);
2162     __ mr(R10_ARG8, R5_ARG3);
2163 
2164     if (UseCompressedOops) {
2165       array_overlap_test(nooverlap_target, 2);
2166       generate_conjoint_int_copy_core(aligned);
2167     } else {
2168       array_overlap_test(nooverlap_target, 3);
2169       generate_conjoint_long_copy_core(aligned);
2170     }
2171 
2172     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
2173     __ li(R3_RET, 0); // return 0
2174     __ blr();
2175     return start;
2176   }
2177 
2178   // Generate stub for disjoint oop copy.  If "aligned" is true, the
2179   // "from" and "to" addresses are assumed to be heapword aligned.
2180   //
2181   // Arguments for generated stub:
2182   //      from:  R3_ARG1
2183   //      to:    R4_ARG2
2184   //      count: R5_ARG3 treated as signed
2185   //      dest_uninitialized: G1 support
2186   //
2187   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2188     StubCodeMark mark(this, "StubRoutines", name);
2189     address start = __ function_entry();
2190     assert_positive_int(R5_ARG3);
2191     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
2192 
2193     // save some arguments, disjoint_long_copy_core destroys them.
2194     // needed for post barrier
2195     __ mr(R9_ARG7, R4_ARG2);
2196     __ mr(R10_ARG8, R5_ARG3);
2197 
2198     if (UseCompressedOops) {
2199       generate_disjoint_int_copy_core(aligned);
2200     } else {
2201       generate_disjoint_long_copy_core(aligned);
2202     }
2203 
2204     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
2205     __ li(R3_RET, 0); // return 0
2206     __ blr();
2207 
2208     return start;
2209   }
2210 
2211 
2212   // Helper for generating a dynamic type check.
2213   // Smashes only the given temp registers.
2214   void generate_type_check(Register sub_klass,
2215                            Register super_check_offset,
2216                            Register super_klass,
2217                            Register temp,
2218                            Label& L_success) {
2219     assert_different_registers(sub_klass, super_check_offset, super_klass);
2220 
2221     BLOCK_COMMENT("type_check:");
2222 
2223     Label L_miss;
2224 
2225     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
2226                                      super_check_offset);
2227     __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
2228 
2229     // Fall through on failure!
2230     __ bind(L_miss);
2231   }
2232 
2233 
2234   //  Generate stub for checked oop copy.
2235   //
2236   // Arguments for generated stub:
2237   //      from:  R3
2238   //      to:    R4
2239   //      count: R5 treated as signed
2240   //      ckoff: R6 (super_check_offset)
2241   //      ckval: R7 (super_klass)
2242   //      ret:   R3 zero for success; (-1^K) where K is partial transfer count
2243   //
2244   address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
2245 
2246     const Register R3_from   = R3_ARG1;      // source array address
2247     const Register R4_to     = R4_ARG2;      // destination array address
2248     const Register R5_count  = R5_ARG3;      // elements count
2249     const Register R6_ckoff  = R6_ARG4;      // super_check_offset
2250     const Register R7_ckval  = R7_ARG5;      // super_klass
2251 
2252     const Register R8_offset = R8_ARG6;      // loop var, with stride wordSize
2253     const Register R9_remain = R9_ARG7;      // loop var, with stride -1
2254     const Register R10_oop   = R10_ARG8;     // actual oop copied
2255     const Register R11_klass = R11_scratch1; // oop._klass
2256     const Register R12_tmp   = R12_scratch2;
2257 
2258     const Register R2_minus1 = R2;
2259 
2260     //__ align(CodeEntryAlignment);
2261     StubCodeMark mark(this, "StubRoutines", name);
2262     address start = __ function_entry();
2263 
2264     // Assert that int is 64 bit sign extended and arrays are not conjoint.
2265 #ifdef ASSERT
2266     {
2267     assert_positive_int(R5_ARG3);
2268     const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2269     Label no_overlap;
2270     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2271     __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2272     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2273     __ cmpld(CCR1, tmp1, tmp2);
2274     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2275     // Overlaps if Src before dst and distance smaller than size.
2276     // Branch to forward copy routine otherwise.
2277     __ blt(CCR0, no_overlap);
2278     __ stop("overlap in checkcast_copy", 0x9543);
2279     __ bind(no_overlap);
2280     }
2281 #endif
2282 
2283     gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
2284 
2285     //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2286 
2287     Label load_element, store_element, store_null, success, do_card_marks;
2288     __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2289     __ li(R8_offset, 0);                   // Offset from start of arrays.
2290     __ li(R2_minus1, -1);
2291     __ bne(CCR0, load_element);
2292 
2293     // Empty array: Nothing to do.
2294     __ li(R3_RET, 0);           // Return 0 on (trivial) success.
2295     __ blr();
2296 
2297     // ======== begin loop ========
2298     // (Entry is load_element.)
2299     __ align(OptoLoopAlignment);
2300     __ bind(store_element);
2301     if (UseCompressedOops) {
2302       __ encode_heap_oop_not_null(R10_oop);
2303       __ bind(store_null);
2304       __ stw(R10_oop, R8_offset, R4_to);
2305     } else {
2306       __ bind(store_null);
2307       __ std(R10_oop, R8_offset, R4_to);
2308     }
2309 
2310     __ addi(R8_offset, R8_offset, heapOopSize);   // Step to next offset.
2311     __ add_(R9_remain, R2_minus1, R9_remain);     // Decrement the count.
2312     __ beq(CCR0, success);
2313 
2314     // ======== loop entry is here ========
2315     __ bind(load_element);
2316     __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null);  // Load the oop.
2317 
2318     __ load_klass(R11_klass, R10_oop); // Query the object klass.
2319 
2320     generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2321                         // Branch to this on success:
2322                         store_element);
2323     // ======== end loop ========
2324 
2325     // It was a real error; we must depend on the caller to finish the job.
2326     // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2327     // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2328     // and report their number to the caller.
2329     __ subf_(R5_count, R9_remain, R5_count);
2330     __ nand(R3_RET, R5_count, R5_count);   // report (-1^K) to caller
2331     __ bne(CCR0, do_card_marks);
2332     __ blr();
2333 
2334     __ bind(success);
2335     __ li(R3_RET, 0);
2336 
2337     __ bind(do_card_marks);
2338     // Store check on R4_to[0..R5_count-1].
2339     gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
2340     __ blr();
2341     return start;
2342   }
2343 
2344 
2345   //  Generate 'unsafe' array copy stub.
2346   //  Though just as safe as the other stubs, it takes an unscaled
2347   //  size_t argument instead of an element count.
2348   //
2349   // Arguments for generated stub:
2350   //      from:  R3
2351   //      to:    R4
2352   //      count: R5 byte count, treated as ssize_t, can be zero
2353   //
2354   // Examines the alignment of the operands and dispatches
2355   // to a long, int, short, or byte copy loop.
2356   //
2357   address generate_unsafe_copy(const char* name,
2358                                address byte_copy_entry,
2359                                address short_copy_entry,
2360                                address int_copy_entry,
2361                                address long_copy_entry) {
2362 
2363     const Register R3_from   = R3_ARG1;      // source array address
2364     const Register R4_to     = R4_ARG2;      // destination array address
2365     const Register R5_count  = R5_ARG3;      // elements count (as long on PPC64)
2366 
2367     const Register R6_bits   = R6_ARG4;      // test copy of low bits
2368     const Register R7_tmp    = R7_ARG5;
2369 
2370     //__ align(CodeEntryAlignment);
2371     StubCodeMark mark(this, "StubRoutines", name);
2372     address start = __ function_entry();
2373 
2374     // Bump this on entry, not on exit:
2375     //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
2376 
2377     Label short_copy, int_copy, long_copy;
2378 
2379     __ orr(R6_bits, R3_from, R4_to);
2380     __ orr(R6_bits, R6_bits, R5_count);
2381     __ andi_(R0, R6_bits, (BytesPerLong-1));
2382     __ beq(CCR0, long_copy);
2383 
2384     __ andi_(R0, R6_bits, (BytesPerInt-1));
2385     __ beq(CCR0, int_copy);
2386 
2387     __ andi_(R0, R6_bits, (BytesPerShort-1));
2388     __ beq(CCR0, short_copy);
2389 
2390     // byte_copy:
2391     __ b(byte_copy_entry);
2392 
2393     __ bind(short_copy);
2394     __ srwi(R5_count, R5_count, LogBytesPerShort);
2395     __ b(short_copy_entry);
2396 
2397     __ bind(int_copy);
2398     __ srwi(R5_count, R5_count, LogBytesPerInt);
2399     __ b(int_copy_entry);
2400 
2401     __ bind(long_copy);
2402     __ srwi(R5_count, R5_count, LogBytesPerLong);
2403     __ b(long_copy_entry);
2404 
2405     return start;
2406   }
2407 
2408 
2409   // Perform range checks on the proposed arraycopy.
2410   // Kills the two temps, but nothing else.
2411   // Also, clean the sign bits of src_pos and dst_pos.
2412   void arraycopy_range_checks(Register src,     // source array oop
2413                               Register src_pos, // source position
2414                               Register dst,     // destination array oop
2415                               Register dst_pos, // destination position
2416                               Register length,  // length of copy
2417                               Register temp1, Register temp2,
2418                               Label& L_failed) {
2419     BLOCK_COMMENT("arraycopy_range_checks:");
2420 
2421     const Register array_length = temp1;  // scratch
2422     const Register end_pos      = temp2;  // scratch
2423 
2424     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2425     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
2426     __ add(end_pos, src_pos, length);  // src_pos + length
2427     __ cmpd(CCR0, end_pos, array_length);
2428     __ bgt(CCR0, L_failed);
2429 
2430     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2431     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
2432     __ add(end_pos, dst_pos, length);  // src_pos + length
2433     __ cmpd(CCR0, end_pos, array_length);
2434     __ bgt(CCR0, L_failed);
2435 
2436     BLOCK_COMMENT("arraycopy_range_checks done");
2437   }
2438 
2439 
2440   //
2441   //  Generate generic array copy stubs
2442   //
2443   //  Input:
2444   //    R3    -  src oop
2445   //    R4    -  src_pos
2446   //    R5    -  dst oop
2447   //    R6    -  dst_pos
2448   //    R7    -  element count
2449   //
2450   //  Output:
2451   //    R3 ==  0  -  success
2452   //    R3 == -1  -  need to call System.arraycopy
2453   //
2454   address generate_generic_copy(const char *name,
2455                                 address entry_jbyte_arraycopy,
2456                                 address entry_jshort_arraycopy,
2457                                 address entry_jint_arraycopy,
2458                                 address entry_oop_arraycopy,
2459                                 address entry_disjoint_oop_arraycopy,
2460                                 address entry_jlong_arraycopy,
2461                                 address entry_checkcast_arraycopy) {
2462     Label L_failed, L_objArray;
2463 
2464     // Input registers
2465     const Register src       = R3_ARG1;  // source array oop
2466     const Register src_pos   = R4_ARG2;  // source position
2467     const Register dst       = R5_ARG3;  // destination array oop
2468     const Register dst_pos   = R6_ARG4;  // destination position
2469     const Register length    = R7_ARG5;  // elements count
2470 
2471     // registers used as temp
2472     const Register src_klass = R8_ARG6;  // source array klass
2473     const Register dst_klass = R9_ARG7;  // destination array klass
2474     const Register lh        = R10_ARG8; // layout handler
2475     const Register temp      = R2;
2476 
2477     //__ align(CodeEntryAlignment);
2478     StubCodeMark mark(this, "StubRoutines", name);
2479     address start = __ function_entry();
2480 
2481     // Bump this on entry, not on exit:
2482     //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
2483 
2484     // In principle, the int arguments could be dirty.
2485 
2486     //-----------------------------------------------------------------------
2487     // Assembler stubs will be used for this call to arraycopy
2488     // if the following conditions are met:
2489     //
2490     // (1) src and dst must not be null.
2491     // (2) src_pos must not be negative.
2492     // (3) dst_pos must not be negative.
2493     // (4) length  must not be negative.
2494     // (5) src klass and dst klass should be the same and not NULL.
2495     // (6) src and dst should be arrays.
2496     // (7) src_pos + length must not exceed length of src.
2497     // (8) dst_pos + length must not exceed length of dst.
2498     BLOCK_COMMENT("arraycopy initial argument checks");
2499 
2500     __ cmpdi(CCR1, src, 0);      // if (src == NULL) return -1;
2501     __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
2502     __ cmpdi(CCR5, dst, 0);      // if (dst == NULL) return -1;
2503     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2504     __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
2505     __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
2506     __ extsw_(length, length);   // if (length < 0) return -1;
2507     __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
2508     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2509     __ beq(CCR1, L_failed);
2510 
2511     BLOCK_COMMENT("arraycopy argument klass checks");
2512     __ load_klass(src_klass, src);
2513     __ load_klass(dst_klass, dst);
2514 
2515     // Load layout helper
2516     //
2517     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2518     // 32        30    24            16              8     2                 0
2519     //
2520     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2521     //
2522 
2523     int lh_offset = in_bytes(Klass::layout_helper_offset());
2524 
2525     // Load 32-bits signed value. Use br() instruction with it to check icc.
2526     __ lwz(lh, lh_offset, src_klass);
2527 
2528     // Handle objArrays completely differently...
2529     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2530     __ load_const_optimized(temp, objArray_lh, R0);
2531     __ cmpw(CCR0, lh, temp);
2532     __ beq(CCR0, L_objArray);
2533 
2534     __ cmpd(CCR5, src_klass, dst_klass);          // if (src->klass() != dst->klass()) return -1;
2535     __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
2536 
2537     __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
2538     __ beq(CCR5, L_failed);
2539 
2540     // At this point, it is known to be a typeArray (array_tag 0x3).
2541 #ifdef ASSERT
2542     { Label L;
2543       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2544       __ load_const_optimized(temp, lh_prim_tag_in_place, R0);
2545       __ cmpw(CCR0, lh, temp);
2546       __ bge(CCR0, L);
2547       __ stop("must be a primitive array");
2548       __ bind(L);
2549     }
2550 #endif
2551 
2552     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2553                            temp, dst_klass, L_failed);
2554 
2555     // TypeArrayKlass
2556     //
2557     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2558     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2559     //
2560 
2561     const Register offset = dst_klass;    // array offset
2562     const Register elsize = src_klass;    // log2 element size
2563 
2564     __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
2565     __ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
2566     __ add(src, offset, src);       // src array offset
2567     __ add(dst, offset, dst);       // dst array offset
2568 
2569     // Next registers should be set before the jump to corresponding stub.
2570     const Register from     = R3_ARG1;  // source array address
2571     const Register to       = R4_ARG2;  // destination array address
2572     const Register count    = R5_ARG3;  // elements count
2573 
2574     // 'from', 'to', 'count' registers should be set in this order
2575     // since they are the same as 'src', 'src_pos', 'dst'.
2576 
2577     BLOCK_COMMENT("scale indexes to element size");
2578     __ sld(src_pos, src_pos, elsize);
2579     __ sld(dst_pos, dst_pos, elsize);
2580     __ add(from, src_pos, src);  // src_addr
2581     __ add(to, dst_pos, dst);    // dst_addr
2582     __ mr(count, length);        // length
2583 
2584     BLOCK_COMMENT("choose copy loop based on element size");
2585     // Using conditional branches with range 32kB.
2586     const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
2587     __ cmpwi(CCR0, elsize, 0);
2588     __ bc(bo, bi, entry_jbyte_arraycopy);
2589     __ cmpwi(CCR0, elsize, LogBytesPerShort);
2590     __ bc(bo, bi, entry_jshort_arraycopy);
2591     __ cmpwi(CCR0, elsize, LogBytesPerInt);
2592     __ bc(bo, bi, entry_jint_arraycopy);
2593 #ifdef ASSERT
2594     { Label L;
2595       __ cmpwi(CCR0, elsize, LogBytesPerLong);
2596       __ beq(CCR0, L);
2597       __ stop("must be long copy, but elsize is wrong");
2598       __ bind(L);
2599     }
2600 #endif
2601     __ b(entry_jlong_arraycopy);
2602 
2603     // ObjArrayKlass
2604   __ bind(L_objArray);
2605     // live at this point:  src_klass, dst_klass, src[_pos], dst[_pos], length
2606 
2607     Label L_disjoint_plain_copy, L_checkcast_copy;
2608     //  test array classes for subtyping
2609     __ cmpd(CCR0, src_klass, dst_klass);         // usual case is exact equality
2610     __ bne(CCR0, L_checkcast_copy);
2611 
2612     // Identically typed arrays can be copied without element-wise checks.
2613     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2614                            temp, lh, L_failed);
2615 
2616     __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2617     __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2618     __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2619     __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2620     __ add(from, src_pos, src);  // src_addr
2621     __ add(to, dst_pos, dst);    // dst_addr
2622     __ mr(count, length);        // length
2623     __ b(entry_oop_arraycopy);
2624 
2625   __ bind(L_checkcast_copy);
2626     // live at this point:  src_klass, dst_klass
2627     {
2628       // Before looking at dst.length, make sure dst is also an objArray.
2629       __ lwz(temp, lh_offset, dst_klass);
2630       __ cmpw(CCR0, lh, temp);
2631       __ bne(CCR0, L_failed);
2632 
2633       // It is safe to examine both src.length and dst.length.
2634       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2635                              temp, lh, L_failed);
2636 
2637       // Marshal the base address arguments now, freeing registers.
2638       __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2639       __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2640       __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2641       __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2642       __ add(from, src_pos, src);  // src_addr
2643       __ add(to, dst_pos, dst);    // dst_addr
2644       __ mr(count, length);        // length
2645 
2646       Register sco_temp = R6_ARG4;             // This register is free now.
2647       assert_different_registers(from, to, count, sco_temp,
2648                                  dst_klass, src_klass);
2649 
2650       // Generate the type check.
2651       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2652       __ lwz(sco_temp, sco_offset, dst_klass);
2653       generate_type_check(src_klass, sco_temp, dst_klass,
2654                           temp, L_disjoint_plain_copy);
2655 
2656       // Fetch destination element klass from the ObjArrayKlass header.
2657       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2658 
2659       // The checkcast_copy loop needs two extra arguments:
2660       __ ld(R7_ARG5, ek_offset, dst_klass);   // dest elem klass
2661       __ lwz(R6_ARG4, sco_offset, R7_ARG5);   // sco of elem klass
2662       __ b(entry_checkcast_arraycopy);
2663     }
2664 
2665     __ bind(L_disjoint_plain_copy);
2666     __ b(entry_disjoint_oop_arraycopy);
2667 
2668   __ bind(L_failed);
2669     __ li(R3_RET, -1); // return -1
2670     __ blr();
2671     return start;
2672   }
2673 
2674   // Arguments for generated stub:
2675   //   R3_ARG1   - source byte array address
2676   //   R4_ARG2   - destination byte array address
2677   //   R5_ARG3   - round key array
2678   address generate_aescrypt_encryptBlock() {
2679     assert(UseAES, "need AES instructions and misaligned SSE support");
2680     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2681 
2682     address start = __ function_entry();
2683 
2684     Label L_doLast;
2685 
2686     Register from           = R3_ARG1;  // source array address
2687     Register to             = R4_ARG2;  // destination array address
2688     Register key            = R5_ARG3;  // round key array
2689 
2690     Register keylen         = R8;
2691     Register temp           = R9;
2692     Register keypos         = R10;
2693     Register fifteen        = R12;
2694 
2695     VectorRegister vRet     = VR0;
2696 
2697     VectorRegister vKey1    = VR1;
2698     VectorRegister vKey2    = VR2;
2699     VectorRegister vKey3    = VR3;
2700     VectorRegister vKey4    = VR4;
2701 
2702     VectorRegister fromPerm = VR5;
2703     VectorRegister keyPerm  = VR6;
2704     VectorRegister toPerm   = VR7;
2705     VectorRegister fSplt    = VR8;
2706 
2707     VectorRegister vTmp1    = VR9;
2708     VectorRegister vTmp2    = VR10;
2709     VectorRegister vTmp3    = VR11;
2710     VectorRegister vTmp4    = VR12;
2711 
2712     __ li              (fifteen, 15);
2713 
2714     // load unaligned from[0-15] to vsRet
2715     __ lvx             (vRet, from);
2716     __ lvx             (vTmp1, fifteen, from);
2717     __ lvsl            (fromPerm, from);
2718 #ifdef VM_LITTLE_ENDIAN
2719     __ vspltisb        (fSplt, 0x0f);
2720     __ vxor            (fromPerm, fromPerm, fSplt);
2721 #endif
2722     __ vperm           (vRet, vRet, vTmp1, fromPerm);
2723 
2724     // load keylen (44 or 52 or 60)
2725     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2726 
2727     // to load keys
2728     __ load_perm       (keyPerm, key);
2729 #ifdef VM_LITTLE_ENDIAN
2730     __ vspltisb        (vTmp2, -16);
2731     __ vrld            (keyPerm, keyPerm, vTmp2);
2732     __ vrld            (keyPerm, keyPerm, vTmp2);
2733     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2734 #endif
2735 
2736     // load the 1st round key to vTmp1
2737     __ lvx             (vTmp1, key);
2738     __ li              (keypos, 16);
2739     __ lvx             (vKey1, keypos, key);
2740     __ vec_perm        (vTmp1, vKey1, keyPerm);
2741 
2742     // 1st round
2743     __ vxor            (vRet, vRet, vTmp1);
2744 
2745     // load the 2nd round key to vKey1
2746     __ li              (keypos, 32);
2747     __ lvx             (vKey2, keypos, key);
2748     __ vec_perm        (vKey1, vKey2, keyPerm);
2749 
2750     // load the 3rd round key to vKey2
2751     __ li              (keypos, 48);
2752     __ lvx             (vKey3, keypos, key);
2753     __ vec_perm        (vKey2, vKey3, keyPerm);
2754 
2755     // load the 4th round key to vKey3
2756     __ li              (keypos, 64);
2757     __ lvx             (vKey4, keypos, key);
2758     __ vec_perm        (vKey3, vKey4, keyPerm);
2759 
2760     // load the 5th round key to vKey4
2761     __ li              (keypos, 80);
2762     __ lvx             (vTmp1, keypos, key);
2763     __ vec_perm        (vKey4, vTmp1, keyPerm);
2764 
2765     // 2nd - 5th rounds
2766     __ vcipher         (vRet, vRet, vKey1);
2767     __ vcipher         (vRet, vRet, vKey2);
2768     __ vcipher         (vRet, vRet, vKey3);
2769     __ vcipher         (vRet, vRet, vKey4);
2770 
2771     // load the 6th round key to vKey1
2772     __ li              (keypos, 96);
2773     __ lvx             (vKey2, keypos, key);
2774     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2775 
2776     // load the 7th round key to vKey2
2777     __ li              (keypos, 112);
2778     __ lvx             (vKey3, keypos, key);
2779     __ vec_perm        (vKey2, vKey3, keyPerm);
2780 
2781     // load the 8th round key to vKey3
2782     __ li              (keypos, 128);
2783     __ lvx             (vKey4, keypos, key);
2784     __ vec_perm        (vKey3, vKey4, keyPerm);
2785 
2786     // load the 9th round key to vKey4
2787     __ li              (keypos, 144);
2788     __ lvx             (vTmp1, keypos, key);
2789     __ vec_perm        (vKey4, vTmp1, keyPerm);
2790 
2791     // 6th - 9th rounds
2792     __ vcipher         (vRet, vRet, vKey1);
2793     __ vcipher         (vRet, vRet, vKey2);
2794     __ vcipher         (vRet, vRet, vKey3);
2795     __ vcipher         (vRet, vRet, vKey4);
2796 
2797     // load the 10th round key to vKey1
2798     __ li              (keypos, 160);
2799     __ lvx             (vKey2, keypos, key);
2800     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2801 
2802     // load the 11th round key to vKey2
2803     __ li              (keypos, 176);
2804     __ lvx             (vTmp1, keypos, key);
2805     __ vec_perm        (vKey2, vTmp1, keyPerm);
2806 
2807     // if all round keys are loaded, skip next 4 rounds
2808     __ cmpwi           (CCR0, keylen, 44);
2809     __ beq             (CCR0, L_doLast);
2810 
2811     // 10th - 11th rounds
2812     __ vcipher         (vRet, vRet, vKey1);
2813     __ vcipher         (vRet, vRet, vKey2);
2814 
2815     // load the 12th round key to vKey1
2816     __ li              (keypos, 192);
2817     __ lvx             (vKey2, keypos, key);
2818     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2819 
2820     // load the 13th round key to vKey2
2821     __ li              (keypos, 208);
2822     __ lvx             (vTmp1, keypos, key);
2823     __ vec_perm        (vKey2, vTmp1, keyPerm);
2824 
2825     // if all round keys are loaded, skip next 2 rounds
2826     __ cmpwi           (CCR0, keylen, 52);
2827     __ beq             (CCR0, L_doLast);
2828 
2829     // 12th - 13th rounds
2830     __ vcipher         (vRet, vRet, vKey1);
2831     __ vcipher         (vRet, vRet, vKey2);
2832 
2833     // load the 14th round key to vKey1
2834     __ li              (keypos, 224);
2835     __ lvx             (vKey2, keypos, key);
2836     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2837 
2838     // load the 15th round key to vKey2
2839     __ li              (keypos, 240);
2840     __ lvx             (vTmp1, keypos, key);
2841     __ vec_perm        (vKey2, vTmp1, keyPerm);
2842 
2843     __ bind(L_doLast);
2844 
2845     // last two rounds
2846     __ vcipher         (vRet, vRet, vKey1);
2847     __ vcipherlast     (vRet, vRet, vKey2);
2848 
2849     // store result (unaligned)
2850 #ifdef VM_LITTLE_ENDIAN
2851     __ lvsl            (toPerm, to);
2852 #else
2853     __ lvsr            (toPerm, to);
2854 #endif
2855     __ vspltisb        (vTmp3, -1);
2856     __ vspltisb        (vTmp4, 0);
2857     __ lvx             (vTmp1, to);
2858     __ lvx             (vTmp2, fifteen, to);
2859 #ifdef VM_LITTLE_ENDIAN
2860     __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
2861     __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
2862 #else
2863     __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
2864 #endif
2865     __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
2866     __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
2867     __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
2868     __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
2869     __ stvx            (vTmp1, to);
2870 
2871     __ blr();
2872      return start;
2873   }
2874 
2875   // Arguments for generated stub:
2876   //   R3_ARG1   - source byte array address
2877   //   R4_ARG2   - destination byte array address
2878   //   R5_ARG3   - K (key) in little endian int array
2879   address generate_aescrypt_decryptBlock() {
2880     assert(UseAES, "need AES instructions and misaligned SSE support");
2881     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2882 
2883     address start = __ function_entry();
2884 
2885     Label L_doLast;
2886     Label L_do44;
2887     Label L_do52;
2888     Label L_do60;
2889 
2890     Register from           = R3_ARG1;  // source array address
2891     Register to             = R4_ARG2;  // destination array address
2892     Register key            = R5_ARG3;  // round key array
2893 
2894     Register keylen         = R8;
2895     Register temp           = R9;
2896     Register keypos         = R10;
2897     Register fifteen        = R12;
2898 
2899     VectorRegister vRet     = VR0;
2900 
2901     VectorRegister vKey1    = VR1;
2902     VectorRegister vKey2    = VR2;
2903     VectorRegister vKey3    = VR3;
2904     VectorRegister vKey4    = VR4;
2905     VectorRegister vKey5    = VR5;
2906 
2907     VectorRegister fromPerm = VR6;
2908     VectorRegister keyPerm  = VR7;
2909     VectorRegister toPerm   = VR8;
2910     VectorRegister fSplt    = VR9;
2911 
2912     VectorRegister vTmp1    = VR10;
2913     VectorRegister vTmp2    = VR11;
2914     VectorRegister vTmp3    = VR12;
2915     VectorRegister vTmp4    = VR13;
2916 
2917     __ li              (fifteen, 15);
2918 
2919     // load unaligned from[0-15] to vsRet
2920     __ lvx             (vRet, from);
2921     __ lvx             (vTmp1, fifteen, from);
2922     __ lvsl            (fromPerm, from);
2923 #ifdef VM_LITTLE_ENDIAN
2924     __ vspltisb        (fSplt, 0x0f);
2925     __ vxor            (fromPerm, fromPerm, fSplt);
2926 #endif
2927     __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
2928 
2929     // load keylen (44 or 52 or 60)
2930     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2931 
2932     // to load keys
2933     __ load_perm       (keyPerm, key);
2934 #ifdef VM_LITTLE_ENDIAN
2935     __ vxor            (vTmp2, vTmp2, vTmp2);
2936     __ vspltisb        (vTmp2, -16);
2937     __ vrld            (keyPerm, keyPerm, vTmp2);
2938     __ vrld            (keyPerm, keyPerm, vTmp2);
2939     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2940 #endif
2941 
2942     __ cmpwi           (CCR0, keylen, 44);
2943     __ beq             (CCR0, L_do44);
2944 
2945     __ cmpwi           (CCR0, keylen, 52);
2946     __ beq             (CCR0, L_do52);
2947 
2948     // load the 15th round key to vKey1
2949     __ li              (keypos, 240);
2950     __ lvx             (vKey1, keypos, key);
2951     __ li              (keypos, 224);
2952     __ lvx             (vKey2, keypos, key);
2953     __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
2954 
2955     // load the 14th round key to vKey2
2956     __ li              (keypos, 208);
2957     __ lvx             (vKey3, keypos, key);
2958     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2959 
2960     // load the 13th round key to vKey3
2961     __ li              (keypos, 192);
2962     __ lvx             (vKey4, keypos, key);
2963     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2964 
2965     // load the 12th round key to vKey4
2966     __ li              (keypos, 176);
2967     __ lvx             (vKey5, keypos, key);
2968     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2969 
2970     // load the 11th round key to vKey5
2971     __ li              (keypos, 160);
2972     __ lvx             (vTmp1, keypos, key);
2973     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2974 
2975     // 1st - 5th rounds
2976     __ vxor            (vRet, vRet, vKey1);
2977     __ vncipher        (vRet, vRet, vKey2);
2978     __ vncipher        (vRet, vRet, vKey3);
2979     __ vncipher        (vRet, vRet, vKey4);
2980     __ vncipher        (vRet, vRet, vKey5);
2981 
2982     __ b               (L_doLast);
2983 
2984     __ bind            (L_do52);
2985 
2986     // load the 13th round key to vKey1
2987     __ li              (keypos, 208);
2988     __ lvx             (vKey1, keypos, key);
2989     __ li              (keypos, 192);
2990     __ lvx             (vKey2, keypos, key);
2991     __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
2992 
2993     // load the 12th round key to vKey2
2994     __ li              (keypos, 176);
2995     __ lvx             (vKey3, keypos, key);
2996     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2997 
2998     // load the 11th round key to vKey3
2999     __ li              (keypos, 160);
3000     __ lvx             (vTmp1, keypos, key);
3001     __ vec_perm        (vKey3, vTmp1, vKey3, keyPerm);
3002 
3003     // 1st - 3rd rounds
3004     __ vxor            (vRet, vRet, vKey1);
3005     __ vncipher        (vRet, vRet, vKey2);
3006     __ vncipher        (vRet, vRet, vKey3);
3007 
3008     __ b               (L_doLast);
3009 
3010     __ bind            (L_do44);
3011 
3012     // load the 11th round key to vKey1
3013     __ li              (keypos, 176);
3014     __ lvx             (vKey1, keypos, key);
3015     __ li              (keypos, 160);
3016     __ lvx             (vTmp1, keypos, key);
3017     __ vec_perm        (vKey1, vTmp1, vKey1, keyPerm);
3018 
3019     // 1st round
3020     __ vxor            (vRet, vRet, vKey1);
3021 
3022     __ bind            (L_doLast);
3023 
3024     // load the 10th round key to vKey1
3025     __ li              (keypos, 144);
3026     __ lvx             (vKey2, keypos, key);
3027     __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
3028 
3029     // load the 9th round key to vKey2
3030     __ li              (keypos, 128);
3031     __ lvx             (vKey3, keypos, key);
3032     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
3033 
3034     // load the 8th round key to vKey3
3035     __ li              (keypos, 112);
3036     __ lvx             (vKey4, keypos, key);
3037     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
3038 
3039     // load the 7th round key to vKey4
3040     __ li              (keypos, 96);
3041     __ lvx             (vKey5, keypos, key);
3042     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
3043 
3044     // load the 6th round key to vKey5
3045     __ li              (keypos, 80);
3046     __ lvx             (vTmp1, keypos, key);
3047     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
3048 
3049     // last 10th - 6th rounds
3050     __ vncipher        (vRet, vRet, vKey1);
3051     __ vncipher        (vRet, vRet, vKey2);
3052     __ vncipher        (vRet, vRet, vKey3);
3053     __ vncipher        (vRet, vRet, vKey4);
3054     __ vncipher        (vRet, vRet, vKey5);
3055 
3056     // load the 5th round key to vKey1
3057     __ li              (keypos, 64);
3058     __ lvx             (vKey2, keypos, key);
3059     __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
3060 
3061     // load the 4th round key to vKey2
3062     __ li              (keypos, 48);
3063     __ lvx             (vKey3, keypos, key);
3064     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
3065 
3066     // load the 3rd round key to vKey3
3067     __ li              (keypos, 32);
3068     __ lvx             (vKey4, keypos, key);
3069     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
3070 
3071     // load the 2nd round key to vKey4
3072     __ li              (keypos, 16);
3073     __ lvx             (vKey5, keypos, key);
3074     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
3075 
3076     // load the 1st round key to vKey5
3077     __ lvx             (vTmp1, key);
3078     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
3079 
3080     // last 5th - 1th rounds
3081     __ vncipher        (vRet, vRet, vKey1);
3082     __ vncipher        (vRet, vRet, vKey2);
3083     __ vncipher        (vRet, vRet, vKey3);
3084     __ vncipher        (vRet, vRet, vKey4);
3085     __ vncipherlast    (vRet, vRet, vKey5);
3086 
3087     // store result (unaligned)
3088 #ifdef VM_LITTLE_ENDIAN
3089     __ lvsl            (toPerm, to);
3090 #else
3091     __ lvsr            (toPerm, to);
3092 #endif
3093     __ vspltisb        (vTmp3, -1);
3094     __ vspltisb        (vTmp4, 0);
3095     __ lvx             (vTmp1, to);
3096     __ lvx             (vTmp2, fifteen, to);
3097 #ifdef VM_LITTLE_ENDIAN
3098     __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
3099     __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
3100 #else
3101     __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
3102 #endif
3103     __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
3104     __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
3105     __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
3106     __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
3107     __ stvx            (vTmp1, to);
3108 
3109     __ blr();
3110      return start;
3111   }
3112 
3113   address generate_sha256_implCompress(bool multi_block, const char *name) {
3114     assert(UseSHA, "need SHA instructions");
3115     StubCodeMark mark(this, "StubRoutines", name);
3116     address start = __ function_entry();
3117 
3118     __ sha256 (multi_block);
3119 
3120     __ blr();
3121     return start;
3122   }
3123 
3124   address generate_sha512_implCompress(bool multi_block, const char *name) {
3125     assert(UseSHA, "need SHA instructions");
3126     StubCodeMark mark(this, "StubRoutines", name);
3127     address start = __ function_entry();
3128 
3129     __ sha512 (multi_block);
3130 
3131     __ blr();
3132     return start;
3133   }
3134 
3135   void generate_arraycopy_stubs() {
3136     // Note: the disjoint stubs must be generated first, some of
3137     // the conjoint stubs use them.
3138 
3139     // non-aligned disjoint versions
3140     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
3141     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
3142     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
3143     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
3144     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
3145     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
3146 
3147     // aligned disjoint versions
3148     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
3149     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
3150     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
3151     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
3152     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
3153     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
3154 
3155     // non-aligned conjoint versions
3156     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
3157     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
3158     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
3159     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
3160     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
3161     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
3162 
3163     // aligned conjoint versions
3164     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
3165     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
3166     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
3167     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
3168     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
3169     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
3170 
3171     // special/generic versions
3172     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", false);
3173     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
3174 
3175     StubRoutines::_unsafe_arraycopy  = generate_unsafe_copy("unsafe_arraycopy",
3176                                                             STUB_ENTRY(jbyte_arraycopy),
3177                                                             STUB_ENTRY(jshort_arraycopy),
3178                                                             STUB_ENTRY(jint_arraycopy),
3179                                                             STUB_ENTRY(jlong_arraycopy));
3180     StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
3181                                                              STUB_ENTRY(jbyte_arraycopy),
3182                                                              STUB_ENTRY(jshort_arraycopy),
3183                                                              STUB_ENTRY(jint_arraycopy),
3184                                                              STUB_ENTRY(oop_arraycopy),
3185                                                              STUB_ENTRY(oop_disjoint_arraycopy),
3186                                                              STUB_ENTRY(jlong_arraycopy),
3187                                                              STUB_ENTRY(checkcast_arraycopy));
3188 
3189     // fill routines
3190     if (OptimizeFill) {
3191       StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
3192       StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
3193       StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
3194       StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
3195       StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3196       StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
3197     }
3198   }
3199 
3200   // Safefetch stubs.
3201   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
3202     // safefetch signatures:
3203     //   int      SafeFetch32(int*      adr, int      errValue);
3204     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
3205     //
3206     // arguments:
3207     //   R3_ARG1 = adr
3208     //   R4_ARG2 = errValue
3209     //
3210     // result:
3211     //   R3_RET  = *adr or errValue
3212 
3213     StubCodeMark mark(this, "StubRoutines", name);
3214 
3215     // Entry point, pc or function descriptor.
3216     *entry = __ function_entry();
3217 
3218     // Load *adr into R4_ARG2, may fault.
3219     *fault_pc = __ pc();
3220     switch (size) {
3221       case 4:
3222         // int32_t, signed extended
3223         __ lwa(R4_ARG2, 0, R3_ARG1);
3224         break;
3225       case 8:
3226         // int64_t
3227         __ ld(R4_ARG2, 0, R3_ARG1);
3228         break;
3229       default:
3230         ShouldNotReachHere();
3231     }
3232 
3233     // return errValue or *adr
3234     *continuation_pc = __ pc();
3235     __ mr(R3_RET, R4_ARG2);
3236     __ blr();
3237   }
3238 
3239   // Stub for BigInteger::multiplyToLen()
3240   //
3241   //  Arguments:
3242   //
3243   //  Input:
3244   //    R3 - x address
3245   //    R4 - x length
3246   //    R5 - y address
3247   //    R6 - y length
3248   //    R7 - z address
3249   //    R8 - z length
3250   //
3251   address generate_multiplyToLen() {
3252 
3253     StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
3254 
3255     address start = __ function_entry();
3256 
3257     const Register x     = R3;
3258     const Register xlen  = R4;
3259     const Register y     = R5;
3260     const Register ylen  = R6;
3261     const Register z     = R7;
3262     const Register zlen  = R8;
3263 
3264     const Register tmp1  = R2; // TOC not used.
3265     const Register tmp2  = R9;
3266     const Register tmp3  = R10;
3267     const Register tmp4  = R11;
3268     const Register tmp5  = R12;
3269 
3270     // non-volatile regs
3271     const Register tmp6  = R31;
3272     const Register tmp7  = R30;
3273     const Register tmp8  = R29;
3274     const Register tmp9  = R28;
3275     const Register tmp10 = R27;
3276     const Register tmp11 = R26;
3277     const Register tmp12 = R25;
3278     const Register tmp13 = R24;
3279 
3280     BLOCK_COMMENT("Entry:");
3281 
3282     // C2 does not respect int to long conversion for stub calls.
3283     __ clrldi(xlen, xlen, 32);
3284     __ clrldi(ylen, ylen, 32);
3285     __ clrldi(zlen, zlen, 32);
3286 
3287     // Save non-volatile regs (frameless).
3288     int current_offs = 8;
3289     __ std(R24, -current_offs, R1_SP); current_offs += 8;
3290     __ std(R25, -current_offs, R1_SP); current_offs += 8;
3291     __ std(R26, -current_offs, R1_SP); current_offs += 8;
3292     __ std(R27, -current_offs, R1_SP); current_offs += 8;
3293     __ std(R28, -current_offs, R1_SP); current_offs += 8;
3294     __ std(R29, -current_offs, R1_SP); current_offs += 8;
3295     __ std(R30, -current_offs, R1_SP); current_offs += 8;
3296     __ std(R31, -current_offs, R1_SP);
3297 
3298     __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
3299                        tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
3300 
3301     // Restore non-volatile regs.
3302     current_offs = 8;
3303     __ ld(R24, -current_offs, R1_SP); current_offs += 8;
3304     __ ld(R25, -current_offs, R1_SP); current_offs += 8;
3305     __ ld(R26, -current_offs, R1_SP); current_offs += 8;
3306     __ ld(R27, -current_offs, R1_SP); current_offs += 8;
3307     __ ld(R28, -current_offs, R1_SP); current_offs += 8;
3308     __ ld(R29, -current_offs, R1_SP); current_offs += 8;
3309     __ ld(R30, -current_offs, R1_SP); current_offs += 8;
3310     __ ld(R31, -current_offs, R1_SP);
3311 
3312     __ blr();  // Return to caller.
3313 
3314     return start;
3315   }
3316 
3317 
3318   // Compute CRC32/CRC32C function.
3319   void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
3320 
3321       // arguments to kernel_crc32:
3322       const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3323       const Register data    = R4_ARG2;  // source byte array
3324       const Register dataLen = R5_ARG3;  // #bytes to process
3325 
3326       const Register t0      = R2;
3327       const Register t1      = R7;
3328       const Register t2      = R8;
3329       const Register t3      = R9;
3330       const Register tc0     = R10;
3331       const Register tc1     = R11;
3332       const Register tc2     = R12;
3333 
3334       BLOCK_COMMENT("Stub body {");
3335       assert_different_registers(crc, data, dataLen, table);
3336 
3337       __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
3338 
3339       BLOCK_COMMENT("return");
3340       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3341       __ blr();
3342 
3343       BLOCK_COMMENT("} Stub body");
3344   }
3345 
3346   /**
3347   *  Arguments:
3348   *
3349   *  Input:
3350   *   R3_ARG1    - out address
3351   *   R4_ARG2    - in address
3352   *   R5_ARG3    - offset
3353   *   R6_ARG4    - len
3354   *   R7_ARG5    - k
3355   *  Output:
3356   *   R3_RET     - carry
3357   */
3358   address generate_mulAdd() {
3359     __ align(CodeEntryAlignment);
3360     StubCodeMark mark(this, "StubRoutines", "mulAdd");
3361 
3362     address start = __ function_entry();
3363 
3364     // C2 does not sign extend signed parameters to full 64 bits registers:
3365     __ rldic (R5_ARG3, R5_ARG3, 2, 32);  // always positive
3366     __ clrldi(R6_ARG4, R6_ARG4, 32);     // force zero bits on higher word
3367     __ clrldi(R7_ARG5, R7_ARG5, 32);     // force zero bits on higher word
3368 
3369     __ muladd(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4, R7_ARG5, R8, R9, R10);
3370 
3371     // Moves output carry to return register
3372     __ mr    (R3_RET,  R10);
3373 
3374     __ blr();
3375 
3376     return start;
3377   }
3378 
3379   /**
3380   *  Arguments:
3381   *
3382   *  Input:
3383   *   R3_ARG1    - in address
3384   *   R4_ARG2    - in length
3385   *   R5_ARG3    - out address
3386   *   R6_ARG4    - out length
3387   */
3388   address generate_squareToLen() {
3389     __ align(CodeEntryAlignment);
3390     StubCodeMark mark(this, "StubRoutines", "squareToLen");
3391 
3392     address start = __ function_entry();
3393 
3394     // args - higher word is cleaned (unsignedly) due to int to long casting
3395     const Register in        = R3_ARG1;
3396     const Register in_len    = R4_ARG2;
3397     __ clrldi(in_len, in_len, 32);
3398     const Register out       = R5_ARG3;
3399     const Register out_len   = R6_ARG4;
3400     __ clrldi(out_len, out_len, 32);
3401 
3402     // output
3403     const Register ret       = R3_RET;
3404 
3405     // temporaries
3406     const Register lplw_s    = R7;
3407     const Register in_aux    = R8;
3408     const Register out_aux   = R9;
3409     const Register piece     = R10;
3410     const Register product   = R14;
3411     const Register lplw      = R15;
3412     const Register i_minus1  = R16;
3413     const Register carry     = R17;
3414     const Register offset    = R18;
3415     const Register off_aux   = R19;
3416     const Register t         = R20;
3417     const Register mlen      = R21;
3418     const Register len       = R22;
3419     const Register a         = R23;
3420     const Register b         = R24;
3421     const Register i         = R25;
3422     const Register c         = R26;
3423     const Register cs        = R27;
3424 
3425     // Labels
3426     Label SKIP_LSHIFT, SKIP_DIAGONAL_SUM, SKIP_ADDONE, SKIP_MULADD, SKIP_LOOP_SQUARE;
3427     Label LOOP_LSHIFT, LOOP_DIAGONAL_SUM, LOOP_ADDONE, LOOP_MULADD, LOOP_SQUARE;
3428 
3429     // Save non-volatile regs (frameless).
3430     int current_offs = -8;
3431     __ std(R28, current_offs, R1_SP); current_offs -= 8;
3432     __ std(R27, current_offs, R1_SP); current_offs -= 8;
3433     __ std(R26, current_offs, R1_SP); current_offs -= 8;
3434     __ std(R25, current_offs, R1_SP); current_offs -= 8;
3435     __ std(R24, current_offs, R1_SP); current_offs -= 8;
3436     __ std(R23, current_offs, R1_SP); current_offs -= 8;
3437     __ std(R22, current_offs, R1_SP); current_offs -= 8;
3438     __ std(R21, current_offs, R1_SP); current_offs -= 8;
3439     __ std(R20, current_offs, R1_SP); current_offs -= 8;
3440     __ std(R19, current_offs, R1_SP); current_offs -= 8;
3441     __ std(R18, current_offs, R1_SP); current_offs -= 8;
3442     __ std(R17, current_offs, R1_SP); current_offs -= 8;
3443     __ std(R16, current_offs, R1_SP); current_offs -= 8;
3444     __ std(R15, current_offs, R1_SP); current_offs -= 8;
3445     __ std(R14, current_offs, R1_SP);
3446 
3447     // Store the squares, right shifted one bit (i.e., divided by 2)
3448     __ subi   (out_aux,   out,       8);
3449     __ subi   (in_aux,    in,        4);
3450     __ cmpwi  (CCR0,      in_len,    0);
3451     // Initialize lplw outside of the loop
3452     __ xorr   (lplw,      lplw,      lplw);
3453     __ ble    (CCR0,      SKIP_LOOP_SQUARE);    // in_len <= 0
3454     __ mtctr  (in_len);
3455 
3456     __ bind(LOOP_SQUARE);
3457     __ lwzu   (piece,     4,         in_aux);
3458     __ mulld  (product,   piece,     piece);
3459     // shift left 63 bits and only keep the MSB
3460     __ rldic  (lplw_s,    lplw,      63, 0);
3461     __ mr     (lplw,      product);
3462     // shift right 1 bit without sign extension
3463     __ srdi   (product,   product,   1);
3464     // join them to the same register and store it
3465     __ orr    (product,   lplw_s,    product);
3466 #ifdef VM_LITTLE_ENDIAN
3467     // Swap low and high words for little endian
3468     __ rldicl (product,   product,   32, 0);
3469 #endif
3470     __ stdu   (product,   8,         out_aux);
3471     __ bdnz   (LOOP_SQUARE);
3472 
3473     __ bind(SKIP_LOOP_SQUARE);
3474 
3475     // Add in off-diagonal sums
3476     __ cmpwi  (CCR0,      in_len,    0);
3477     __ ble    (CCR0,      SKIP_DIAGONAL_SUM);
3478     // Avoid CTR usage here in order to use it at mulAdd
3479     __ subi   (i_minus1,  in_len,    1);
3480     __ li     (offset,    4);
3481 
3482     __ bind(LOOP_DIAGONAL_SUM);
3483 
3484     __ sldi   (off_aux,   out_len,   2);
3485     __ sub    (off_aux,   off_aux,   offset);
3486 
3487     __ mr     (len,       i_minus1);
3488     __ sldi   (mlen,      i_minus1,  2);
3489     __ lwzx   (t,         in,        mlen);
3490 
3491     __ muladd (out, in, off_aux, len, t, a, b, carry);
3492 
3493     // begin<addOne>
3494     // off_aux = out_len*4 - 4 - mlen - offset*4 - 4;
3495     __ addi   (mlen,      mlen,      4);
3496     __ sldi   (a,         out_len,   2);
3497     __ subi   (a,         a,         4);
3498     __ sub    (a,         a,         mlen);
3499     __ subi   (off_aux,   offset,    4);
3500     __ sub    (off_aux,   a,         off_aux);
3501 
3502     __ lwzx   (b,         off_aux,   out);
3503     __ add    (b,         b,         carry);
3504     __ stwx   (b,         off_aux,   out);
3505 
3506     // if (((uint64_t)s >> 32) != 0) {
3507     __ srdi_  (a,         b,         32);
3508     __ beq    (CCR0,      SKIP_ADDONE);
3509 
3510     // while (--mlen >= 0) {
3511     __ bind(LOOP_ADDONE);
3512     __ subi   (mlen,      mlen,      4);
3513     __ cmpwi  (CCR0,      mlen,      0);
3514     __ beq    (CCR0,      SKIP_ADDONE);
3515 
3516     // if (--offset_aux < 0) { // Carry out of number
3517     __ subi   (off_aux,   off_aux,   4);
3518     __ cmpwi  (CCR0,      off_aux,   0);
3519     __ blt    (CCR0,      SKIP_ADDONE);
3520 
3521     // } else {
3522     __ lwzx   (b,         off_aux,   out);
3523     __ addi   (b,         b,         1);
3524     __ stwx   (b,         off_aux,   out);
3525     __ cmpwi  (CCR0,      b,         0);
3526     __ bne    (CCR0,      SKIP_ADDONE);
3527     __ b      (LOOP_ADDONE);
3528 
3529     __ bind(SKIP_ADDONE);
3530     // } } } end<addOne>
3531 
3532     __ addi   (offset,    offset,    8);
3533     __ subi   (i_minus1,  i_minus1,  1);
3534     __ cmpwi  (CCR0,      i_minus1,  0);
3535     __ bge    (CCR0,      LOOP_DIAGONAL_SUM);
3536 
3537     __ bind(SKIP_DIAGONAL_SUM);
3538 
3539     // Shift back up and set low bit
3540     // Shifts 1 bit left up to len positions. Assumes no leading zeros
3541     // begin<primitiveLeftShift>
3542     __ cmpwi  (CCR0,      out_len,   0);
3543     __ ble    (CCR0,      SKIP_LSHIFT);
3544     __ li     (i,         0);
3545     __ lwz    (c,         0,         out);
3546     __ subi   (b,         out_len,   1);
3547     __ mtctr  (b);
3548 
3549     __ bind(LOOP_LSHIFT);
3550     __ mr     (b,         c);
3551     __ addi   (cs,        i,         4);
3552     __ lwzx   (c,         out,       cs);
3553 
3554     __ sldi   (b,         b,         1);
3555     __ srwi   (cs,        c,         31);
3556     __ orr    (b,         b,         cs);
3557     __ stwx   (b,         i,         out);
3558 
3559     __ addi   (i,         i,         4);
3560     __ bdnz   (LOOP_LSHIFT);
3561 
3562     __ sldi   (c,         out_len,   2);
3563     __ subi   (c,         c,         4);
3564     __ lwzx   (b,         out,       c);
3565     __ sldi   (b,         b,         1);
3566     __ stwx   (b,         out,       c);
3567 
3568     __ bind(SKIP_LSHIFT);
3569     // end<primitiveLeftShift>
3570 
3571     // Set low bit
3572     __ sldi   (i,         in_len,    2);
3573     __ subi   (i,         i,         4);
3574     __ lwzx   (i,         in,        i);
3575     __ sldi   (c,         out_len,   2);
3576     __ subi   (c,         c,         4);
3577     __ lwzx   (b,         out,       c);
3578 
3579     __ andi   (i,         i,         1);
3580     __ orr    (i,         b,         i);
3581 
3582     __ stwx   (i,         out,       c);
3583 
3584     // Restore non-volatile regs.
3585     current_offs = -8;
3586     __ ld(R28, current_offs, R1_SP); current_offs -= 8;
3587     __ ld(R27, current_offs, R1_SP); current_offs -= 8;
3588     __ ld(R26, current_offs, R1_SP); current_offs -= 8;
3589     __ ld(R25, current_offs, R1_SP); current_offs -= 8;
3590     __ ld(R24, current_offs, R1_SP); current_offs -= 8;
3591     __ ld(R23, current_offs, R1_SP); current_offs -= 8;
3592     __ ld(R22, current_offs, R1_SP); current_offs -= 8;
3593     __ ld(R21, current_offs, R1_SP); current_offs -= 8;
3594     __ ld(R20, current_offs, R1_SP); current_offs -= 8;
3595     __ ld(R19, current_offs, R1_SP); current_offs -= 8;
3596     __ ld(R18, current_offs, R1_SP); current_offs -= 8;
3597     __ ld(R17, current_offs, R1_SP); current_offs -= 8;
3598     __ ld(R16, current_offs, R1_SP); current_offs -= 8;
3599     __ ld(R15, current_offs, R1_SP); current_offs -= 8;
3600     __ ld(R14, current_offs, R1_SP);
3601 
3602     __ mr(ret, out);
3603     __ blr();
3604 
3605     return start;
3606   }
3607 
3608   /**
3609    * Arguments:
3610    *
3611    * Inputs:
3612    *   R3_ARG1    - int   crc
3613    *   R4_ARG2    - byte* buf
3614    *   R5_ARG3    - int   length (of buffer)
3615    *
3616    * scratch:
3617    *   R2, R6-R12
3618    *
3619    * Ouput:
3620    *   R3_RET     - int   crc result
3621    */
3622   // Compute CRC32 function.
3623   address generate_CRC32_updateBytes(const char* name) {
3624     __ align(CodeEntryAlignment);
3625     StubCodeMark mark(this, "StubRoutines", name);
3626     address start = __ function_entry();  // Remember stub start address (is rtn value).
3627 
3628     const Register table   = R6;       // crc table address
3629 
3630     // arguments to kernel_crc32:
3631     const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3632     const Register data    = R4_ARG2;  // source byte array
3633     const Register dataLen = R5_ARG3;  // #bytes to process
3634 
3635     if (VM_Version::has_vpmsumb()) {
3636       const Register constants    = R2;  // constants address
3637       const Register bconstants   = R8;  // barret table address
3638 
3639       const Register t0      = R9;
3640       const Register t1      = R10;
3641       const Register t2      = R11;
3642       const Register t3      = R12;
3643       const Register t4      = R7;
3644 
3645       BLOCK_COMMENT("Stub body {");
3646       assert_different_registers(crc, data, dataLen, table);
3647 
3648       StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3649       StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
3650       StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
3651 
3652       __ kernel_crc32_1word_vpmsum(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
3653 
3654       BLOCK_COMMENT("return");
3655       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3656       __ blr();
3657 
3658       BLOCK_COMMENT("} Stub body");
3659     } else {
3660       StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3661       generate_CRC_updateBytes(name, table, true);
3662     }
3663 
3664     return start;
3665   }
3666 
3667 
3668   /**
3669    * Arguments:
3670    *
3671    * Inputs:
3672    *   R3_ARG1    - int   crc
3673    *   R4_ARG2    - byte* buf
3674    *   R5_ARG3    - int   length (of buffer)
3675    *
3676    * scratch:
3677    *   R2, R6-R12
3678    *
3679    * Ouput:
3680    *   R3_RET     - int   crc result
3681    */
3682   // Compute CRC32C function.
3683   address generate_CRC32C_updateBytes(const char* name) {
3684     __ align(CodeEntryAlignment);
3685     StubCodeMark mark(this, "StubRoutines", name);
3686     address start = __ function_entry();  // Remember stub start address (is rtn value).
3687 
3688     const Register table   = R6;       // crc table address
3689 
3690     // arguments to kernel_crc32:
3691     const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3692     const Register data    = R4_ARG2;  // source byte array
3693     const Register dataLen = R5_ARG3;  // #bytes to process
3694 
3695     if (VM_Version::has_vpmsumb()) {
3696       const Register constants    = R2;  // constants address
3697       const Register bconstants   = R8;  // barret table address
3698 
3699       const Register t0      = R9;
3700       const Register t1      = R10;
3701       const Register t2      = R11;
3702       const Register t3      = R12;
3703       const Register t4      = R7;
3704 
3705       BLOCK_COMMENT("Stub body {");
3706       assert_different_registers(crc, data, dataLen, table);
3707 
3708       StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
3709       StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
3710       StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
3711 
3712       __ kernel_crc32_1word_vpmsum(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
3713 
3714       BLOCK_COMMENT("return");
3715       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3716       __ blr();
3717 
3718       BLOCK_COMMENT("} Stub body");
3719     } else {
3720       StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
3721       generate_CRC_updateBytes(name, table, false);
3722     }
3723 
3724     return start;
3725   }
3726 
3727 
3728   // Initialization
3729   void generate_initial() {
3730     // Generates all stubs and initializes the entry points
3731 
3732     // Entry points that exist in all platforms.
3733     // Note: This is code that could be shared among different platforms - however the
3734     // benefit seems to be smaller than the disadvantage of having a
3735     // much more complicated generator structure. See also comment in
3736     // stubRoutines.hpp.
3737 
3738     StubRoutines::_forward_exception_entry          = generate_forward_exception();
3739     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
3740     StubRoutines::_catch_exception_entry            = generate_catch_exception();
3741 
3742     // Build this early so it's available for the interpreter.
3743     StubRoutines::_throw_StackOverflowError_entry   =
3744       generate_throw_exception("StackOverflowError throw_exception",
3745                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
3746     StubRoutines::_throw_delayed_StackOverflowError_entry =
3747       generate_throw_exception("delayed StackOverflowError throw_exception",
3748                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
3749 
3750     // CRC32 Intrinsics.
3751     if (UseCRC32Intrinsics) {
3752       StubRoutines::_crc_table_adr    = (address)StubRoutines::ppc64::_crc_table;
3753       StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
3754     }
3755 
3756     // CRC32C Intrinsics.
3757     if (UseCRC32CIntrinsics) {
3758       StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
3759       StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
3760     }
3761   }
3762 
3763   void generate_all() {
3764     // Generates all stubs and initializes the entry points
3765 
3766     // These entry points require SharedInfo::stack0 to be set up in
3767     // non-core builds
3768     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3769     // Handle IncompatibleClassChangeError in itable stubs.
3770     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3771     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3772 
3773     // support for verify_oop (must happen after universe_init)
3774     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
3775 
3776     // arraycopy stubs used by compilers
3777     generate_arraycopy_stubs();
3778 
3779     // Safefetch stubs.
3780     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
3781                                                        &StubRoutines::_safefetch32_fault_pc,
3782                                                        &StubRoutines::_safefetch32_continuation_pc);
3783     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3784                                                        &StubRoutines::_safefetchN_fault_pc,
3785                                                        &StubRoutines::_safefetchN_continuation_pc);
3786 
3787 #ifdef COMPILER2
3788     if (UseMultiplyToLenIntrinsic) {
3789       StubRoutines::_multiplyToLen = generate_multiplyToLen();
3790     }
3791 #endif
3792 
3793     if (UseSquareToLenIntrinsic) {
3794       StubRoutines::_squareToLen = generate_squareToLen();
3795     }
3796     if (UseMulAddIntrinsic) {
3797       StubRoutines::_mulAdd = generate_mulAdd();
3798     }
3799     if (UseMontgomeryMultiplyIntrinsic) {
3800       StubRoutines::_montgomeryMultiply
3801         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
3802     }
3803     if (UseMontgomerySquareIntrinsic) {
3804       StubRoutines::_montgomerySquare
3805         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
3806     }
3807 
3808     if (UseAESIntrinsics) {
3809       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3810       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3811     }
3812 
3813     if (UseSHA256Intrinsics) {
3814       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
3815       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
3816     }
3817     if (UseSHA512Intrinsics) {
3818       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
3819       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
3820     }
3821   }
3822 
3823  public:
3824   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3825     // replace the standard masm with a special one:
3826     _masm = new MacroAssembler(code);
3827     if (all) {
3828       generate_all();
3829     } else {
3830       generate_initial();
3831     }
3832   }
3833 };
3834 
3835 void StubGenerator_generate(CodeBuffer* code, bool all) {
3836   StubGenerator g(code, all);
3837 }