1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetCodeGen.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "nativeInst_ppc.hpp"
  32 #include "oops/instanceOop.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubCodeGenerator.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "utilities/align.hpp"
  44 
  45 // Declaration and definition of StubGenerator (no .hpp file).
  46 // For a more detailed description of the stub routine structure
  47 // see the comment in stubRoutines.hpp.
  48 
  49 #define __ _masm->
  50 
  51 #ifdef PRODUCT
  52 #define BLOCK_COMMENT(str) // nothing
  53 #else
  54 #define BLOCK_COMMENT(str) __ block_comment(str)
  55 #endif
  56 
  57 #if defined(ABI_ELFv2)
  58 #define STUB_ENTRY(name) StubRoutines::name()
  59 #else
  60 #define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
  61 #endif
  62 
  63 class StubGenerator: public StubCodeGenerator {
  64  private:
  65 
  66   // Call stubs are used to call Java from C
  67   //
  68   // Arguments:
  69   //
  70   //   R3  - call wrapper address     : address
  71   //   R4  - result                   : intptr_t*
  72   //   R5  - result type              : BasicType
  73   //   R6  - method                   : Method
  74   //   R7  - frame mgr entry point    : address
  75   //   R8  - parameter block          : intptr_t*
  76   //   R9  - parameter count in words : int
  77   //   R10 - thread                   : Thread*
  78   //
  79   address generate_call_stub(address& return_address) {
  80     // Setup a new c frame, copy java arguments, call frame manager or
  81     // native_entry, and process result.
  82 
  83     StubCodeMark mark(this, "StubRoutines", "call_stub");
  84 
  85     address start = __ function_entry();
  86 
  87     // some sanity checks
  88     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
  89     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
  90     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
  91     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
  92     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
  93 
  94     Register r_arg_call_wrapper_addr        = R3;
  95     Register r_arg_result_addr              = R4;
  96     Register r_arg_result_type              = R5;
  97     Register r_arg_method                   = R6;
  98     Register r_arg_entry                    = R7;
  99     Register r_arg_thread                   = R10;
 100 
 101     Register r_temp                         = R24;
 102     Register r_top_of_arguments_addr        = R25;
 103     Register r_entryframe_fp                = R26;
 104 
 105     {
 106       // Stack on entry to call_stub:
 107       //
 108       //      F1      [C_FRAME]
 109       //              ...
 110 
 111       Register r_arg_argument_addr          = R8;
 112       Register r_arg_argument_count         = R9;
 113       Register r_frame_alignment_in_bytes   = R27;
 114       Register r_argument_addr              = R28;
 115       Register r_argumentcopy_addr          = R29;
 116       Register r_argument_size_in_bytes     = R30;
 117       Register r_frame_size                 = R23;
 118 
 119       Label arguments_copied;
 120 
 121       // Save LR/CR to caller's C_FRAME.
 122       __ save_LR_CR(R0);
 123 
 124       // Zero extend arg_argument_count.
 125       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
 126 
 127       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
 128       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 129 
 130       // Keep copy of our frame pointer (caller's SP).
 131       __ mr(r_entryframe_fp, R1_SP);
 132 
 133       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
 134       // Push ENTRY_FRAME including arguments:
 135       //
 136       //      F0      [TOP_IJAVA_FRAME_ABI]
 137       //              alignment (optional)
 138       //              [outgoing Java arguments]
 139       //              [ENTRY_FRAME_LOCALS]
 140       //      F1      [C_FRAME]
 141       //              ...
 142 
 143       // calculate frame size
 144 
 145       // unaligned size of arguments
 146       __ sldi(r_argument_size_in_bytes,
 147                   r_arg_argument_count, Interpreter::logStackElementSize);
 148       // arguments alignment (max 1 slot)
 149       // FIXME: use round_to() here
 150       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
 151       __ sldi(r_frame_alignment_in_bytes,
 152               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
 153 
 154       // size = unaligned size of arguments + top abi's size
 155       __ addi(r_frame_size, r_argument_size_in_bytes,
 156               frame::top_ijava_frame_abi_size);
 157       // size += arguments alignment
 158       __ add(r_frame_size,
 159              r_frame_size, r_frame_alignment_in_bytes);
 160       // size += size of call_stub locals
 161       __ addi(r_frame_size,
 162               r_frame_size, frame::entry_frame_locals_size);
 163 
 164       // push ENTRY_FRAME
 165       __ push_frame(r_frame_size, r_temp);
 166 
 167       // initialize call_stub locals (step 1)
 168       __ std(r_arg_call_wrapper_addr,
 169              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
 170       __ std(r_arg_result_addr,
 171              _entry_frame_locals_neg(result_address), r_entryframe_fp);
 172       __ std(r_arg_result_type,
 173              _entry_frame_locals_neg(result_type), r_entryframe_fp);
 174       // we will save arguments_tos_address later
 175 
 176 
 177       BLOCK_COMMENT("Copy Java arguments");
 178       // copy Java arguments
 179 
 180       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
 181       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
 182       __ addi(r_top_of_arguments_addr,
 183               R1_SP, frame::top_ijava_frame_abi_size);
 184       __ add(r_top_of_arguments_addr,
 185              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
 186 
 187       // any arguments to copy?
 188       __ cmpdi(CCR0, r_arg_argument_count, 0);
 189       __ beq(CCR0, arguments_copied);
 190 
 191       // prepare loop and copy arguments in reverse order
 192       {
 193         // init CTR with arg_argument_count
 194         __ mtctr(r_arg_argument_count);
 195 
 196         // let r_argumentcopy_addr point to last outgoing Java arguments P
 197         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
 198 
 199         // let r_argument_addr point to last incoming java argument
 200         __ add(r_argument_addr,
 201                    r_arg_argument_addr, r_argument_size_in_bytes);
 202         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 203 
 204         // now loop while CTR > 0 and copy arguments
 205         {
 206           Label next_argument;
 207           __ bind(next_argument);
 208 
 209           __ ld(r_temp, 0, r_argument_addr);
 210           // argument_addr--;
 211           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 212           __ std(r_temp, 0, r_argumentcopy_addr);
 213           // argumentcopy_addr++;
 214           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
 215 
 216           __ bdnz(next_argument);
 217         }
 218       }
 219 
 220       // Arguments copied, continue.
 221       __ bind(arguments_copied);
 222     }
 223 
 224     {
 225       BLOCK_COMMENT("Call frame manager or native entry.");
 226       // Call frame manager or native entry.
 227       Register r_new_arg_entry = R14;
 228       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
 229                                  r_arg_method, r_arg_thread);
 230 
 231       __ mr(r_new_arg_entry, r_arg_entry);
 232 
 233       // Register state on entry to frame manager / native entry:
 234       //
 235       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
 236       //   R19_method  -  Method
 237       //   R16_thread  -  JavaThread*
 238 
 239       // Tos must point to last argument - element_size.
 240       const Register tos = R15_esp;
 241 
 242       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
 243 
 244       // initialize call_stub locals (step 2)
 245       // now save tos as arguments_tos_address
 246       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
 247 
 248       // load argument registers for call
 249       __ mr(R19_method, r_arg_method);
 250       __ mr(R16_thread, r_arg_thread);
 251       assert(tos != r_arg_method, "trashed r_arg_method");
 252       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
 253 
 254       // Set R15_prev_state to 0 for simplifying checks in callee.
 255       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
 256       // Stack on entry to frame manager / native entry:
 257       //
 258       //      F0      [TOP_IJAVA_FRAME_ABI]
 259       //              alignment (optional)
 260       //              [outgoing Java arguments]
 261       //              [ENTRY_FRAME_LOCALS]
 262       //      F1      [C_FRAME]
 263       //              ...
 264       //
 265 
 266       // global toc register
 267       __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
 268       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
 269       // when called via a c2i.
 270 
 271       // Pass initial_caller_sp to framemanager.
 272       __ mr(R21_tmp1, R1_SP);
 273 
 274       // Do a light-weight C-call here, r_new_arg_entry holds the address
 275       // of the interpreter entry point (frame manager or native entry)
 276       // and save runtime-value of LR in return_address.
 277       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
 278              "trashed r_new_arg_entry");
 279       return_address = __ call_stub(r_new_arg_entry);
 280     }
 281 
 282     {
 283       BLOCK_COMMENT("Returned from frame manager or native entry.");
 284       // Returned from frame manager or native entry.
 285       // Now pop frame, process result, and return to caller.
 286 
 287       // Stack on exit from frame manager / native entry:
 288       //
 289       //      F0      [ABI]
 290       //              ...
 291       //              [ENTRY_FRAME_LOCALS]
 292       //      F1      [C_FRAME]
 293       //              ...
 294       //
 295       // Just pop the topmost frame ...
 296       //
 297 
 298       Label ret_is_object;
 299       Label ret_is_long;
 300       Label ret_is_float;
 301       Label ret_is_double;
 302 
 303       Register r_entryframe_fp = R30;
 304       Register r_lr            = R7_ARG5;
 305       Register r_cr            = R8_ARG6;
 306 
 307       // Reload some volatile registers which we've spilled before the call
 308       // to frame manager / native entry.
 309       // Access all locals via frame pointer, because we know nothing about
 310       // the topmost frame's size.
 311       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
 312       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
 313       __ ld(r_arg_result_addr,
 314             _entry_frame_locals_neg(result_address), r_entryframe_fp);
 315       __ ld(r_arg_result_type,
 316             _entry_frame_locals_neg(result_type), r_entryframe_fp);
 317       __ ld(r_cr, _abi(cr), r_entryframe_fp);
 318       __ ld(r_lr, _abi(lr), r_entryframe_fp);
 319 
 320       // pop frame and restore non-volatiles, LR and CR
 321       __ mr(R1_SP, r_entryframe_fp);
 322       __ mtcr(r_cr);
 323       __ mtlr(r_lr);
 324 
 325       // Store result depending on type. Everything that is not
 326       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
 327       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
 328       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
 329       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
 330       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
 331 
 332       // restore non-volatile registers
 333       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 334 
 335 
 336       // Stack on exit from call_stub:
 337       //
 338       //      0       [C_FRAME]
 339       //              ...
 340       //
 341       //  no call_stub frames left.
 342 
 343       // All non-volatiles have been restored at this point!!
 344       assert(R3_RET == R3, "R3_RET should be R3");
 345 
 346       __ beq(CCR0, ret_is_object);
 347       __ beq(CCR1, ret_is_long);
 348       __ beq(CCR5, ret_is_float);
 349       __ beq(CCR6, ret_is_double);
 350 
 351       // default:
 352       __ stw(R3_RET, 0, r_arg_result_addr);
 353       __ blr(); // return to caller
 354 
 355       // case T_OBJECT:
 356       __ bind(ret_is_object);
 357       __ std(R3_RET, 0, r_arg_result_addr);
 358       __ blr(); // return to caller
 359 
 360       // case T_LONG:
 361       __ bind(ret_is_long);
 362       __ std(R3_RET, 0, r_arg_result_addr);
 363       __ blr(); // return to caller
 364 
 365       // case T_FLOAT:
 366       __ bind(ret_is_float);
 367       __ stfs(F1_RET, 0, r_arg_result_addr);
 368       __ blr(); // return to caller
 369 
 370       // case T_DOUBLE:
 371       __ bind(ret_is_double);
 372       __ stfd(F1_RET, 0, r_arg_result_addr);
 373       __ blr(); // return to caller
 374     }
 375 
 376     return start;
 377   }
 378 
 379   // Return point for a Java call if there's an exception thrown in
 380   // Java code.  The exception is caught and transformed into a
 381   // pending exception stored in JavaThread that can be tested from
 382   // within the VM.
 383   //
 384   address generate_catch_exception() {
 385     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 386 
 387     address start = __ pc();
 388 
 389     // Registers alive
 390     //
 391     //  R16_thread
 392     //  R3_ARG1 - address of pending exception
 393     //  R4_ARG2 - return address in call stub
 394 
 395     const Register exception_file = R21_tmp1;
 396     const Register exception_line = R22_tmp2;
 397 
 398     __ load_const(exception_file, (void*)__FILE__);
 399     __ load_const(exception_line, (void*)__LINE__);
 400 
 401     __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
 402     // store into `char *'
 403     __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
 404     // store into `int'
 405     __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
 406 
 407     // complete return to VM
 408     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 409 
 410     __ mtlr(R4_ARG2);
 411     // continue in call stub
 412     __ blr();
 413 
 414     return start;
 415   }
 416 
 417   // Continuation point for runtime calls returning with a pending
 418   // exception.  The pending exception check happened in the runtime
 419   // or native call stub.  The pending exception in Thread is
 420   // converted into a Java-level exception.
 421   //
 422   // Read:
 423   //
 424   //   LR:     The pc the runtime library callee wants to return to.
 425   //           Since the exception occurred in the callee, the return pc
 426   //           from the point of view of Java is the exception pc.
 427   //   thread: Needed for method handles.
 428   //
 429   // Invalidate:
 430   //
 431   //   volatile registers (except below).
 432   //
 433   // Update:
 434   //
 435   //   R4_ARG2: exception
 436   //
 437   // (LR is unchanged and is live out).
 438   //
 439   address generate_forward_exception() {
 440     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 441     address start = __ pc();
 442 
 443 #if !defined(PRODUCT)
 444     if (VerifyOops) {
 445       // Get pending exception oop.
 446       __ ld(R3_ARG1,
 447                 in_bytes(Thread::pending_exception_offset()),
 448                 R16_thread);
 449       // Make sure that this code is only executed if there is a pending exception.
 450       {
 451         Label L;
 452         __ cmpdi(CCR0, R3_ARG1, 0);
 453         __ bne(CCR0, L);
 454         __ stop("StubRoutines::forward exception: no pending exception (1)");
 455         __ bind(L);
 456       }
 457       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
 458     }
 459 #endif
 460 
 461     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
 462     __ save_LR_CR(R4_ARG2);
 463     __ push_frame_reg_args(0, R0);
 464     // Find exception handler.
 465     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 466                      SharedRuntime::exception_handler_for_return_address),
 467                     R16_thread,
 468                     R4_ARG2);
 469     // Copy handler's address.
 470     __ mtctr(R3_RET);
 471     __ pop_frame();
 472     __ restore_LR_CR(R0);
 473 
 474     // Set up the arguments for the exception handler:
 475     //  - R3_ARG1: exception oop
 476     //  - R4_ARG2: exception pc.
 477 
 478     // Load pending exception oop.
 479     __ ld(R3_ARG1,
 480               in_bytes(Thread::pending_exception_offset()),
 481               R16_thread);
 482 
 483     // The exception pc is the return address in the caller.
 484     // Must load it into R4_ARG2.
 485     __ mflr(R4_ARG2);
 486 
 487 #ifdef ASSERT
 488     // Make sure exception is set.
 489     {
 490       Label L;
 491       __ cmpdi(CCR0, R3_ARG1, 0);
 492       __ bne(CCR0, L);
 493       __ stop("StubRoutines::forward exception: no pending exception (2)");
 494       __ bind(L);
 495     }
 496 #endif
 497 
 498     // Clear the pending exception.
 499     __ li(R0, 0);
 500     __ std(R0,
 501                in_bytes(Thread::pending_exception_offset()),
 502                R16_thread);
 503     // Jump to exception handler.
 504     __ bctr();
 505 
 506     return start;
 507   }
 508 
 509 #undef __
 510 #define __ masm->
 511   // Continuation point for throwing of implicit exceptions that are
 512   // not handled in the current activation. Fabricates an exception
 513   // oop and initiates normal exception dispatching in this
 514   // frame. Only callee-saved registers are preserved (through the
 515   // normal register window / RegisterMap handling).  If the compiler
 516   // needs all registers to be preserved between the fault point and
 517   // the exception handler then it must assume responsibility for that
 518   // in AbstractCompiler::continuation_for_implicit_null_exception or
 519   // continuation_for_implicit_division_by_zero_exception. All other
 520   // implicit exceptions (e.g., NullPointerException or
 521   // AbstractMethodError on entry) are either at call sites or
 522   // otherwise assume that stack unwinding will be initiated, so
 523   // caller saved registers were assumed volatile in the compiler.
 524   //
 525   // Note that we generate only this stub into a RuntimeStub, because
 526   // it needs to be properly traversed and ignored during GC, so we
 527   // change the meaning of the "__" macro within this method.
 528   //
 529   // Note: the routine set_pc_not_at_call_for_caller in
 530   // SharedRuntime.cpp requires that this code be generated into a
 531   // RuntimeStub.
 532   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 533                                    Register arg1 = noreg, Register arg2 = noreg) {
 534     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
 535     MacroAssembler* masm = new MacroAssembler(&code);
 536 
 537     OopMapSet* oop_maps  = new OopMapSet();
 538     int frame_size_in_bytes = frame::abi_reg_args_size;
 539     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
 540 
 541     address start = __ pc();
 542 
 543     __ save_LR_CR(R11_scratch1);
 544 
 545     // Push a frame.
 546     __ push_frame_reg_args(0, R11_scratch1);
 547 
 548     address frame_complete_pc = __ pc();
 549 
 550     if (restore_saved_exception_pc) {
 551       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
 552     }
 553 
 554     // Note that we always have a runtime stub frame on the top of
 555     // stack by this point. Remember the offset of the instruction
 556     // whose address will be moved to R11_scratch1.
 557     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
 558 
 559     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
 560 
 561     __ mr(R3_ARG1, R16_thread);
 562     if (arg1 != noreg) {
 563       __ mr(R4_ARG2, arg1);
 564     }
 565     if (arg2 != noreg) {
 566       __ mr(R5_ARG3, arg2);
 567     }
 568 #if defined(ABI_ELFv2)
 569     __ call_c(runtime_entry, relocInfo::none);
 570 #else
 571     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
 572 #endif
 573 
 574     // Set an oopmap for the call site.
 575     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
 576 
 577     __ reset_last_Java_frame();
 578 
 579 #ifdef ASSERT
 580     // Make sure that this code is only executed if there is a pending
 581     // exception.
 582     {
 583       Label L;
 584       __ ld(R0,
 585                 in_bytes(Thread::pending_exception_offset()),
 586                 R16_thread);
 587       __ cmpdi(CCR0, R0, 0);
 588       __ bne(CCR0, L);
 589       __ stop("StubRoutines::throw_exception: no pending exception");
 590       __ bind(L);
 591     }
 592 #endif
 593 
 594     // Pop frame.
 595     __ pop_frame();
 596 
 597     __ restore_LR_CR(R11_scratch1);
 598 
 599     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
 600     __ mtctr(R11_scratch1);
 601     __ bctr();
 602 
 603     // Create runtime stub with OopMap.
 604     RuntimeStub* stub =
 605       RuntimeStub::new_runtime_stub(name, &code,
 606                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
 607                                     frame_size_in_bytes/wordSize,
 608                                     oop_maps,
 609                                     false);
 610     return stub->entry_point();
 611   }
 612 #undef __
 613 #define __ _masm->
 614 
 615 
 616   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
 617   //
 618   // Arguments:
 619   //   to:
 620   //   count:
 621   //
 622   // Destroys:
 623   //
 624   address generate_zero_words_aligned8() {
 625     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
 626 
 627     // Implemented as in ClearArray.
 628     address start = __ function_entry();
 629 
 630     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
 631     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
 632     Register tmp1_reg       = R5_ARG3;
 633     Register tmp2_reg       = R6_ARG4;
 634     Register zero_reg       = R7_ARG5;
 635 
 636     // Procedure for large arrays (uses data cache block zero instruction).
 637     Label dwloop, fast, fastloop, restloop, lastdword, done;
 638     int cl_size = VM_Version::L1_data_cache_line_size();
 639     int cl_dwords = cl_size >> 3;
 640     int cl_dwordaddr_bits = exact_log2(cl_dwords);
 641     int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
 642 
 643     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
 644     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
 645     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
 646     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
 647     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
 648 
 649     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
 650     __ beq(CCR0, lastdword);                    // size <= 1
 651     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
 652     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
 653     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
 654 
 655     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
 656     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
 657 
 658     __ beq(CCR0, fast);                         // already 128byte aligned
 659     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
 660     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
 661 
 662     // Clear in first cache line dword-by-dword if not already 128byte aligned.
 663     __ bind(dwloop);
 664       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 665       __ addi(base_ptr_reg, base_ptr_reg, 8);
 666     __ bdnz(dwloop);
 667 
 668     // clear 128byte blocks
 669     __ bind(fast);
 670     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
 671     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
 672 
 673     __ mtctr(tmp1_reg);                         // load counter
 674     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
 675     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
 676 
 677     __ bind(fastloop);
 678       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
 679       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
 680     __ bdnz(fastloop);
 681 
 682     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
 683     __ beq(CCR0, lastdword);                    // rest<=1
 684     __ mtctr(tmp1_reg);                         // load counter
 685 
 686     // Clear rest.
 687     __ bind(restloop);
 688       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 689       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
 690       __ addi(base_ptr_reg, base_ptr_reg, 16);
 691     __ bdnz(restloop);
 692 
 693     __ bind(lastdword);
 694     __ beq(CCR1, done);
 695     __ std(zero_reg, 0, base_ptr_reg);
 696     __ bind(done);
 697     __ blr();                                   // return
 698 
 699     return start;
 700   }
 701 
 702 #if !defined(PRODUCT)
 703   // Wrapper which calls oopDesc::is_oop_or_null()
 704   // Only called by MacroAssembler::verify_oop
 705   static void verify_oop_helper(const char* message, oop o) {
 706     if (!oopDesc::is_oop_or_null(o)) {
 707       fatal("%s", message);
 708     }
 709     ++ StubRoutines::_verify_oop_count;
 710   }
 711 #endif
 712 
 713   // Return address of code to be called from code generated by
 714   // MacroAssembler::verify_oop.
 715   //
 716   // Don't generate, rather use C++ code.
 717   address generate_verify_oop() {
 718     // this is actually a `FunctionDescriptor*'.
 719     address start = 0;
 720 
 721 #if !defined(PRODUCT)
 722     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 723 #endif
 724 
 725     return start;
 726   }
 727 
 728   // Fairer handling of safepoints for native methods.
 729   //
 730   // Generate code which reads from the polling page. This special handling is needed as the
 731   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 732   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 733   // to read from the safepoint polling page.
 734   address generate_load_from_poll() {
 735     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 736     address start = __ function_entry();
 737     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 738     return start;
 739   }
 740 
 741   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 742   //
 743   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 744   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 745   //
 746   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 747   // for turning on loop predication optimization, and hence the behavior of "array range check"
 748   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 749   //
 750   // Generate stub for disjoint short fill. If "aligned" is true, the
 751   // "to" address is assumed to be heapword aligned.
 752   //
 753   // Arguments for generated stub:
 754   //   to:    R3_ARG1
 755   //   value: R4_ARG2
 756   //   count: R5_ARG3 treated as signed
 757   //
 758   address generate_fill(BasicType t, bool aligned, const char* name) {
 759     StubCodeMark mark(this, "StubRoutines", name);
 760     address start = __ function_entry();
 761 
 762     const Register to    = R3_ARG1;   // source array address
 763     const Register value = R4_ARG2;   // fill value
 764     const Register count = R5_ARG3;   // elements count
 765     const Register temp  = R6_ARG4;   // temp register
 766 
 767     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
 768 
 769     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
 770     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
 771 
 772     int shift = -1;
 773     switch (t) {
 774        case T_BYTE:
 775         shift = 2;
 776         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 777         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
 778         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 779         __ blt(CCR0, L_fill_elements);
 780         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 781         break;
 782        case T_SHORT:
 783         shift = 1;
 784         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 785         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 786         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 787         __ blt(CCR0, L_fill_elements);
 788         break;
 789       case T_INT:
 790         shift = 0;
 791         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 792         __ blt(CCR0, L_fill_4_bytes);
 793         break;
 794       default: ShouldNotReachHere();
 795     }
 796 
 797     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
 798       // Align source address at 4 bytes address boundary.
 799       if (t == T_BYTE) {
 800         // One byte misalignment happens only for byte arrays.
 801         __ andi_(temp, to, 1);
 802         __ beq(CCR0, L_skip_align1);
 803         __ stb(value, 0, to);
 804         __ addi(to, to, 1);
 805         __ addi(count, count, -1);
 806         __ bind(L_skip_align1);
 807       }
 808       // Two bytes misalignment happens only for byte and short (char) arrays.
 809       __ andi_(temp, to, 2);
 810       __ beq(CCR0, L_skip_align2);
 811       __ sth(value, 0, to);
 812       __ addi(to, to, 2);
 813       __ addi(count, count, -(1 << (shift - 1)));
 814       __ bind(L_skip_align2);
 815     }
 816 
 817     if (!aligned) {
 818       // Align to 8 bytes, we know we are 4 byte aligned to start.
 819       __ andi_(temp, to, 7);
 820       __ beq(CCR0, L_fill_32_bytes);
 821       __ stw(value, 0, to);
 822       __ addi(to, to, 4);
 823       __ addi(count, count, -(1 << shift));
 824       __ bind(L_fill_32_bytes);
 825     }
 826 
 827     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
 828     // Clone bytes int->long as above.
 829     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
 830 
 831     Label L_check_fill_8_bytes;
 832     // Fill 32-byte chunks.
 833     __ subf_(count, temp, count);
 834     __ blt(CCR0, L_check_fill_8_bytes);
 835 
 836     Label L_fill_32_bytes_loop;
 837     __ align(32);
 838     __ bind(L_fill_32_bytes_loop);
 839 
 840     __ std(value, 0, to);
 841     __ std(value, 8, to);
 842     __ subf_(count, temp, count);           // Update count.
 843     __ std(value, 16, to);
 844     __ std(value, 24, to);
 845 
 846     __ addi(to, to, 32);
 847     __ bge(CCR0, L_fill_32_bytes_loop);
 848 
 849     __ bind(L_check_fill_8_bytes);
 850     __ add_(count, temp, count);
 851     __ beq(CCR0, L_exit);
 852     __ addic_(count, count, -(2 << shift));
 853     __ blt(CCR0, L_fill_4_bytes);
 854 
 855     //
 856     // Length is too short, just fill 8 bytes at a time.
 857     //
 858     Label L_fill_8_bytes_loop;
 859     __ bind(L_fill_8_bytes_loop);
 860     __ std(value, 0, to);
 861     __ addic_(count, count, -(2 << shift));
 862     __ addi(to, to, 8);
 863     __ bge(CCR0, L_fill_8_bytes_loop);
 864 
 865     // Fill trailing 4 bytes.
 866     __ bind(L_fill_4_bytes);
 867     __ andi_(temp, count, 1<<shift);
 868     __ beq(CCR0, L_fill_2_bytes);
 869 
 870     __ stw(value, 0, to);
 871     if (t == T_BYTE || t == T_SHORT) {
 872       __ addi(to, to, 4);
 873       // Fill trailing 2 bytes.
 874       __ bind(L_fill_2_bytes);
 875       __ andi_(temp, count, 1<<(shift-1));
 876       __ beq(CCR0, L_fill_byte);
 877       __ sth(value, 0, to);
 878       if (t == T_BYTE) {
 879         __ addi(to, to, 2);
 880         // Fill trailing byte.
 881         __ bind(L_fill_byte);
 882         __ andi_(count, count, 1);
 883         __ beq(CCR0, L_exit);
 884         __ stb(value, 0, to);
 885       } else {
 886         __ bind(L_fill_byte);
 887       }
 888     } else {
 889       __ bind(L_fill_2_bytes);
 890     }
 891     __ bind(L_exit);
 892     __ blr();
 893 
 894     // Handle copies less than 8 bytes. Int is handled elsewhere.
 895     if (t == T_BYTE) {
 896       __ bind(L_fill_elements);
 897       Label L_fill_2, L_fill_4;
 898       __ andi_(temp, count, 1);
 899       __ beq(CCR0, L_fill_2);
 900       __ stb(value, 0, to);
 901       __ addi(to, to, 1);
 902       __ bind(L_fill_2);
 903       __ andi_(temp, count, 2);
 904       __ beq(CCR0, L_fill_4);
 905       __ stb(value, 0, to);
 906       __ stb(value, 0, to);
 907       __ addi(to, to, 2);
 908       __ bind(L_fill_4);
 909       __ andi_(temp, count, 4);
 910       __ beq(CCR0, L_exit);
 911       __ stb(value, 0, to);
 912       __ stb(value, 1, to);
 913       __ stb(value, 2, to);
 914       __ stb(value, 3, to);
 915       __ blr();
 916     }
 917 
 918     if (t == T_SHORT) {
 919       Label L_fill_2;
 920       __ bind(L_fill_elements);
 921       __ andi_(temp, count, 1);
 922       __ beq(CCR0, L_fill_2);
 923       __ sth(value, 0, to);
 924       __ addi(to, to, 2);
 925       __ bind(L_fill_2);
 926       __ andi_(temp, count, 2);
 927       __ beq(CCR0, L_exit);
 928       __ sth(value, 0, to);
 929       __ sth(value, 2, to);
 930       __ blr();
 931     }
 932     return start;
 933   }
 934 
 935   inline void assert_positive_int(Register count) {
 936 #ifdef ASSERT
 937     __ srdi_(R0, count, 31);
 938     __ asm_assert_eq("missing zero extend", 0xAFFE);
 939 #endif
 940   }
 941 
 942   // Generate overlap test for array copy stubs.
 943   //
 944   // Input:
 945   //   R3_ARG1    -  from
 946   //   R4_ARG2    -  to
 947   //   R5_ARG3    -  element count
 948   //
 949   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
 950     Register tmp1 = R6_ARG4;
 951     Register tmp2 = R7_ARG5;
 952 
 953     assert_positive_int(R5_ARG3);
 954 
 955     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
 956     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
 957     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
 958     __ cmpld(CCR1, tmp1, tmp2);
 959     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
 960     // Overlaps if Src before dst and distance smaller than size.
 961     // Branch to forward copy routine otherwise (within range of 32kB).
 962     __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
 963 
 964     // need to copy backwards
 965   }
 966 
 967   // The guideline in the implementations of generate_disjoint_xxx_copy
 968   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
 969   // single instructions, but to avoid alignment interrupts (see subsequent
 970   // comment). Furthermore, we try to minimize misaligned access, even
 971   // though they cause no alignment interrupt.
 972   //
 973   // In Big-Endian mode, the PowerPC architecture requires implementations to
 974   // handle automatically misaligned integer halfword and word accesses,
 975   // word-aligned integer doubleword accesses, and word-aligned floating-point
 976   // accesses. Other accesses may or may not generate an Alignment interrupt
 977   // depending on the implementation.
 978   // Alignment interrupt handling may require on the order of hundreds of cycles,
 979   // so every effort should be made to avoid misaligned memory values.
 980   //
 981   //
 982   // Generate stub for disjoint byte copy.  If "aligned" is true, the
 983   // "from" and "to" addresses are assumed to be heapword aligned.
 984   //
 985   // Arguments for generated stub:
 986   //      from:  R3_ARG1
 987   //      to:    R4_ARG2
 988   //      count: R5_ARG3 treated as signed
 989   //
 990   address generate_disjoint_byte_copy(bool aligned, const char * name) {
 991     StubCodeMark mark(this, "StubRoutines", name);
 992     address start = __ function_entry();
 993     assert_positive_int(R5_ARG3);
 994 
 995     Register tmp1 = R6_ARG4;
 996     Register tmp2 = R7_ARG5;
 997     Register tmp3 = R8_ARG6;
 998     Register tmp4 = R9_ARG7;
 999 
1000     VectorSRegister tmp_vsr1  = VSR1;
1001     VectorSRegister tmp_vsr2  = VSR2;
1002 
1003     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10;
1004 
1005     // Don't try anything fancy if arrays don't have many elements.
1006     __ li(tmp3, 0);
1007     __ cmpwi(CCR0, R5_ARG3, 17);
1008     __ ble(CCR0, l_6); // copy 4 at a time
1009 
1010     if (!aligned) {
1011       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1012       __ andi_(tmp1, tmp1, 3);
1013       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1014 
1015       // Copy elements if necessary to align to 4 bytes.
1016       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1017       __ andi_(tmp1, tmp1, 3);
1018       __ beq(CCR0, l_2);
1019 
1020       __ subf(R5_ARG3, tmp1, R5_ARG3);
1021       __ bind(l_9);
1022       __ lbz(tmp2, 0, R3_ARG1);
1023       __ addic_(tmp1, tmp1, -1);
1024       __ stb(tmp2, 0, R4_ARG2);
1025       __ addi(R3_ARG1, R3_ARG1, 1);
1026       __ addi(R4_ARG2, R4_ARG2, 1);
1027       __ bne(CCR0, l_9);
1028 
1029       __ bind(l_2);
1030     }
1031 
1032     // copy 8 elements at a time
1033     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1034     __ andi_(tmp1, tmp2, 7);
1035     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1036 
1037     // copy a 2-element word if necessary to align to 8 bytes
1038     __ andi_(R0, R3_ARG1, 7);
1039     __ beq(CCR0, l_7);
1040 
1041     __ lwzx(tmp2, R3_ARG1, tmp3);
1042     __ addi(R5_ARG3, R5_ARG3, -4);
1043     __ stwx(tmp2, R4_ARG2, tmp3);
1044     { // FasterArrayCopy
1045       __ addi(R3_ARG1, R3_ARG1, 4);
1046       __ addi(R4_ARG2, R4_ARG2, 4);
1047     }
1048     __ bind(l_7);
1049 
1050     { // FasterArrayCopy
1051       __ cmpwi(CCR0, R5_ARG3, 31);
1052       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1053 
1054       __ srdi(tmp1, R5_ARG3, 5);
1055       __ andi_(R5_ARG3, R5_ARG3, 31);
1056       __ mtctr(tmp1);
1057 
1058      if (!VM_Version::has_vsx()) {
1059 
1060       __ bind(l_8);
1061       // Use unrolled version for mass copying (copy 32 elements a time)
1062       // Load feeding store gets zero latency on Power6, however not on Power5.
1063       // Therefore, the following sequence is made for the good of both.
1064       __ ld(tmp1, 0, R3_ARG1);
1065       __ ld(tmp2, 8, R3_ARG1);
1066       __ ld(tmp3, 16, R3_ARG1);
1067       __ ld(tmp4, 24, R3_ARG1);
1068       __ std(tmp1, 0, R4_ARG2);
1069       __ std(tmp2, 8, R4_ARG2);
1070       __ std(tmp3, 16, R4_ARG2);
1071       __ std(tmp4, 24, R4_ARG2);
1072       __ addi(R3_ARG1, R3_ARG1, 32);
1073       __ addi(R4_ARG2, R4_ARG2, 32);
1074       __ bdnz(l_8);
1075 
1076     } else { // Processor supports VSX, so use it to mass copy.
1077 
1078       // Prefetch the data into the L2 cache.
1079       __ dcbt(R3_ARG1, 0);
1080 
1081       // If supported set DSCR pre-fetch to deepest.
1082       if (VM_Version::has_mfdscr()) {
1083         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1084         __ mtdscr(tmp2);
1085       }
1086 
1087       __ li(tmp1, 16);
1088 
1089       // Backbranch target aligned to 32-byte. Not 16-byte align as
1090       // loop contains < 8 instructions that fit inside a single
1091       // i-cache sector.
1092       __ align(32);
1093 
1094       __ bind(l_10);
1095       // Use loop with VSX load/store instructions to
1096       // copy 32 elements a time.
1097       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1098       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1099       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1100       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1101       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1102       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1103       __ bdnz(l_10);                       // Dec CTR and loop if not zero.
1104 
1105       // Restore DSCR pre-fetch value.
1106       if (VM_Version::has_mfdscr()) {
1107         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1108         __ mtdscr(tmp2);
1109       }
1110 
1111     } // VSX
1112    } // FasterArrayCopy
1113 
1114     __ bind(l_6);
1115 
1116     // copy 4 elements at a time
1117     __ cmpwi(CCR0, R5_ARG3, 4);
1118     __ blt(CCR0, l_1);
1119     __ srdi(tmp1, R5_ARG3, 2);
1120     __ mtctr(tmp1); // is > 0
1121     __ andi_(R5_ARG3, R5_ARG3, 3);
1122 
1123     { // FasterArrayCopy
1124       __ addi(R3_ARG1, R3_ARG1, -4);
1125       __ addi(R4_ARG2, R4_ARG2, -4);
1126       __ bind(l_3);
1127       __ lwzu(tmp2, 4, R3_ARG1);
1128       __ stwu(tmp2, 4, R4_ARG2);
1129       __ bdnz(l_3);
1130       __ addi(R3_ARG1, R3_ARG1, 4);
1131       __ addi(R4_ARG2, R4_ARG2, 4);
1132     }
1133 
1134     // do single element copy
1135     __ bind(l_1);
1136     __ cmpwi(CCR0, R5_ARG3, 0);
1137     __ beq(CCR0, l_4);
1138 
1139     { // FasterArrayCopy
1140       __ mtctr(R5_ARG3);
1141       __ addi(R3_ARG1, R3_ARG1, -1);
1142       __ addi(R4_ARG2, R4_ARG2, -1);
1143 
1144       __ bind(l_5);
1145       __ lbzu(tmp2, 1, R3_ARG1);
1146       __ stbu(tmp2, 1, R4_ARG2);
1147       __ bdnz(l_5);
1148     }
1149 
1150     __ bind(l_4);
1151     __ li(R3_RET, 0); // return 0
1152     __ blr();
1153 
1154     return start;
1155   }
1156 
1157   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1158   // "from" and "to" addresses are assumed to be heapword aligned.
1159   //
1160   // Arguments for generated stub:
1161   //      from:  R3_ARG1
1162   //      to:    R4_ARG2
1163   //      count: R5_ARG3 treated as signed
1164   //
1165   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1166     StubCodeMark mark(this, "StubRoutines", name);
1167     address start = __ function_entry();
1168     assert_positive_int(R5_ARG3);
1169 
1170     Register tmp1 = R6_ARG4;
1171     Register tmp2 = R7_ARG5;
1172     Register tmp3 = R8_ARG6;
1173 
1174     address nooverlap_target = aligned ?
1175       STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
1176       STUB_ENTRY(jbyte_disjoint_arraycopy);
1177 
1178     array_overlap_test(nooverlap_target, 0);
1179     // Do reverse copy. We assume the case of actual overlap is rare enough
1180     // that we don't have to optimize it.
1181     Label l_1, l_2;
1182 
1183     __ b(l_2);
1184     __ bind(l_1);
1185     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1186     __ bind(l_2);
1187     __ addic_(R5_ARG3, R5_ARG3, -1);
1188     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1189     __ bge(CCR0, l_1);
1190 
1191     __ li(R3_RET, 0); // return 0
1192     __ blr();
1193 
1194     return start;
1195   }
1196 
1197   // Generate stub for disjoint short copy.  If "aligned" is true, the
1198   // "from" and "to" addresses are assumed to be heapword aligned.
1199   //
1200   // Arguments for generated stub:
1201   //      from:  R3_ARG1
1202   //      to:    R4_ARG2
1203   //  elm.count: R5_ARG3 treated as signed
1204   //
1205   // Strategy for aligned==true:
1206   //
1207   //  If length <= 9:
1208   //     1. copy 2 elements at a time (l_6)
1209   //     2. copy last element if original element count was odd (l_1)
1210   //
1211   //  If length > 9:
1212   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1213   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1214   //     3. copy last element if one was left in step 2. (l_1)
1215   //
1216   //
1217   // Strategy for aligned==false:
1218   //
1219   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1220   //                  can be unaligned (see comment below)
1221   //
1222   //  If length > 9:
1223   //     1. continue with step 6. if the alignment of from and to mod 4
1224   //        is different.
1225   //     2. align from and to to 4 bytes by copying 1 element if necessary
1226   //     3. at l_2 from and to are 4 byte aligned; continue with
1227   //        5. if they cannot be aligned to 8 bytes because they have
1228   //        got different alignment mod 8.
1229   //     4. at this point we know that both, from and to, have the same
1230   //        alignment mod 8, now copy one element if necessary to get
1231   //        8 byte alignment of from and to.
1232   //     5. copy 4 elements at a time until less than 4 elements are
1233   //        left; depending on step 3. all load/stores are aligned or
1234   //        either all loads or all stores are unaligned.
1235   //     6. copy 2 elements at a time until less than 2 elements are
1236   //        left (l_6); arriving here from step 1., there is a chance
1237   //        that all accesses are unaligned.
1238   //     7. copy last element if one was left in step 6. (l_1)
1239   //
1240   //  There are unaligned data accesses using integer load/store
1241   //  instructions in this stub. POWER allows such accesses.
1242   //
1243   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1244   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1245   //  integer load/stores have good performance. Only unaligned
1246   //  floating point load/stores can have poor performance.
1247   //
1248   //  TODO:
1249   //
1250   //  1. check if aligning the backbranch target of loops is beneficial
1251   //
1252   address generate_disjoint_short_copy(bool aligned, const char * name) {
1253     StubCodeMark mark(this, "StubRoutines", name);
1254 
1255     Register tmp1 = R6_ARG4;
1256     Register tmp2 = R7_ARG5;
1257     Register tmp3 = R8_ARG6;
1258     Register tmp4 = R9_ARG7;
1259 
1260     VectorSRegister tmp_vsr1  = VSR1;
1261     VectorSRegister tmp_vsr2  = VSR2;
1262 
1263     address start = __ function_entry();
1264     assert_positive_int(R5_ARG3);
1265 
1266     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1267 
1268     // don't try anything fancy if arrays don't have many elements
1269     __ li(tmp3, 0);
1270     __ cmpwi(CCR0, R5_ARG3, 9);
1271     __ ble(CCR0, l_6); // copy 2 at a time
1272 
1273     if (!aligned) {
1274       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1275       __ andi_(tmp1, tmp1, 3);
1276       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1277 
1278       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1279 
1280       // Copy 1 element if necessary to align to 4 bytes.
1281       __ andi_(tmp1, R3_ARG1, 3);
1282       __ beq(CCR0, l_2);
1283 
1284       __ lhz(tmp2, 0, R3_ARG1);
1285       __ addi(R3_ARG1, R3_ARG1, 2);
1286       __ sth(tmp2, 0, R4_ARG2);
1287       __ addi(R4_ARG2, R4_ARG2, 2);
1288       __ addi(R5_ARG3, R5_ARG3, -1);
1289       __ bind(l_2);
1290 
1291       // At this point the positions of both, from and to, are at least 4 byte aligned.
1292 
1293       // Copy 4 elements at a time.
1294       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1295       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1296       __ andi_(tmp1, tmp2, 7);
1297       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1298 
1299       // Copy a 2-element word if necessary to align to 8 bytes.
1300       __ andi_(R0, R3_ARG1, 7);
1301       __ beq(CCR0, l_7);
1302 
1303       __ lwzx(tmp2, R3_ARG1, tmp3);
1304       __ addi(R5_ARG3, R5_ARG3, -2);
1305       __ stwx(tmp2, R4_ARG2, tmp3);
1306       { // FasterArrayCopy
1307         __ addi(R3_ARG1, R3_ARG1, 4);
1308         __ addi(R4_ARG2, R4_ARG2, 4);
1309       }
1310     }
1311 
1312     __ bind(l_7);
1313 
1314     // Copy 4 elements at a time; either the loads or the stores can
1315     // be unaligned if aligned == false.
1316 
1317     { // FasterArrayCopy
1318       __ cmpwi(CCR0, R5_ARG3, 15);
1319       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1320 
1321       __ srdi(tmp1, R5_ARG3, 4);
1322       __ andi_(R5_ARG3, R5_ARG3, 15);
1323       __ mtctr(tmp1);
1324 
1325       if (!VM_Version::has_vsx()) {
1326 
1327         __ bind(l_8);
1328         // Use unrolled version for mass copying (copy 16 elements a time).
1329         // Load feeding store gets zero latency on Power6, however not on Power5.
1330         // Therefore, the following sequence is made for the good of both.
1331         __ ld(tmp1, 0, R3_ARG1);
1332         __ ld(tmp2, 8, R3_ARG1);
1333         __ ld(tmp3, 16, R3_ARG1);
1334         __ ld(tmp4, 24, R3_ARG1);
1335         __ std(tmp1, 0, R4_ARG2);
1336         __ std(tmp2, 8, R4_ARG2);
1337         __ std(tmp3, 16, R4_ARG2);
1338         __ std(tmp4, 24, R4_ARG2);
1339         __ addi(R3_ARG1, R3_ARG1, 32);
1340         __ addi(R4_ARG2, R4_ARG2, 32);
1341         __ bdnz(l_8);
1342 
1343       } else { // Processor supports VSX, so use it to mass copy.
1344 
1345         // Prefetch src data into L2 cache.
1346         __ dcbt(R3_ARG1, 0);
1347 
1348         // If supported set DSCR pre-fetch to deepest.
1349         if (VM_Version::has_mfdscr()) {
1350           __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1351           __ mtdscr(tmp2);
1352         }
1353         __ li(tmp1, 16);
1354 
1355         // Backbranch target aligned to 32-byte. It's not aligned 16-byte
1356         // as loop contains < 8 instructions that fit inside a single
1357         // i-cache sector.
1358         __ align(32);
1359 
1360         __ bind(l_9);
1361         // Use loop with VSX load/store instructions to
1362         // copy 16 elements a time.
1363         __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load from src.
1364         __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst.
1365         __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1);  // Load from src + 16.
1366         __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
1367         __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32.
1368         __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32.
1369         __ bdnz(l_9);                        // Dec CTR and loop if not zero.
1370 
1371         // Restore DSCR pre-fetch value.
1372         if (VM_Version::has_mfdscr()) {
1373           __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1374           __ mtdscr(tmp2);
1375         }
1376 
1377       }
1378     } // FasterArrayCopy
1379     __ bind(l_6);
1380 
1381     // copy 2 elements at a time
1382     { // FasterArrayCopy
1383       __ cmpwi(CCR0, R5_ARG3, 2);
1384       __ blt(CCR0, l_1);
1385       __ srdi(tmp1, R5_ARG3, 1);
1386       __ andi_(R5_ARG3, R5_ARG3, 1);
1387 
1388       __ addi(R3_ARG1, R3_ARG1, -4);
1389       __ addi(R4_ARG2, R4_ARG2, -4);
1390       __ mtctr(tmp1);
1391 
1392       __ bind(l_3);
1393       __ lwzu(tmp2, 4, R3_ARG1);
1394       __ stwu(tmp2, 4, R4_ARG2);
1395       __ bdnz(l_3);
1396 
1397       __ addi(R3_ARG1, R3_ARG1, 4);
1398       __ addi(R4_ARG2, R4_ARG2, 4);
1399     }
1400 
1401     // do single element copy
1402     __ bind(l_1);
1403     __ cmpwi(CCR0, R5_ARG3, 0);
1404     __ beq(CCR0, l_4);
1405 
1406     { // FasterArrayCopy
1407       __ mtctr(R5_ARG3);
1408       __ addi(R3_ARG1, R3_ARG1, -2);
1409       __ addi(R4_ARG2, R4_ARG2, -2);
1410 
1411       __ bind(l_5);
1412       __ lhzu(tmp2, 2, R3_ARG1);
1413       __ sthu(tmp2, 2, R4_ARG2);
1414       __ bdnz(l_5);
1415     }
1416     __ bind(l_4);
1417     __ li(R3_RET, 0); // return 0
1418     __ blr();
1419 
1420     return start;
1421   }
1422 
1423   // Generate stub for conjoint short copy.  If "aligned" is true, the
1424   // "from" and "to" addresses are assumed to be heapword aligned.
1425   //
1426   // Arguments for generated stub:
1427   //      from:  R3_ARG1
1428   //      to:    R4_ARG2
1429   //      count: R5_ARG3 treated as signed
1430   //
1431   address generate_conjoint_short_copy(bool aligned, const char * name) {
1432     StubCodeMark mark(this, "StubRoutines", name);
1433     address start = __ function_entry();
1434     assert_positive_int(R5_ARG3);
1435 
1436     Register tmp1 = R6_ARG4;
1437     Register tmp2 = R7_ARG5;
1438     Register tmp3 = R8_ARG6;
1439 
1440     address nooverlap_target = aligned ?
1441       STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
1442       STUB_ENTRY(jshort_disjoint_arraycopy);
1443 
1444     array_overlap_test(nooverlap_target, 1);
1445 
1446     Label l_1, l_2;
1447     __ sldi(tmp1, R5_ARG3, 1);
1448     __ b(l_2);
1449     __ bind(l_1);
1450     __ sthx(tmp2, R4_ARG2, tmp1);
1451     __ bind(l_2);
1452     __ addic_(tmp1, tmp1, -2);
1453     __ lhzx(tmp2, R3_ARG1, tmp1);
1454     __ bge(CCR0, l_1);
1455 
1456     __ li(R3_RET, 0); // return 0
1457     __ blr();
1458 
1459     return start;
1460   }
1461 
1462   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1463   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1464   //
1465   // Arguments:
1466   //      from:  R3_ARG1
1467   //      to:    R4_ARG2
1468   //      count: R5_ARG3 treated as signed
1469   //
1470   void generate_disjoint_int_copy_core(bool aligned) {
1471     Register tmp1 = R6_ARG4;
1472     Register tmp2 = R7_ARG5;
1473     Register tmp3 = R8_ARG6;
1474     Register tmp4 = R0;
1475 
1476     VectorSRegister tmp_vsr1  = VSR1;
1477     VectorSRegister tmp_vsr2  = VSR2;
1478 
1479     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1480 
1481     // for short arrays, just do single element copy
1482     __ li(tmp3, 0);
1483     __ cmpwi(CCR0, R5_ARG3, 5);
1484     __ ble(CCR0, l_2);
1485 
1486     if (!aligned) {
1487         // check if arrays have same alignment mod 8.
1488         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1489         __ andi_(R0, tmp1, 7);
1490         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1491         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1492 
1493         // copy 1 element to align to and from on an 8 byte boundary
1494         __ andi_(R0, R3_ARG1, 7);
1495         __ beq(CCR0, l_4);
1496 
1497         __ lwzx(tmp2, R3_ARG1, tmp3);
1498         __ addi(R5_ARG3, R5_ARG3, -1);
1499         __ stwx(tmp2, R4_ARG2, tmp3);
1500         { // FasterArrayCopy
1501           __ addi(R3_ARG1, R3_ARG1, 4);
1502           __ addi(R4_ARG2, R4_ARG2, 4);
1503         }
1504         __ bind(l_4);
1505       }
1506 
1507     { // FasterArrayCopy
1508       __ cmpwi(CCR0, R5_ARG3, 7);
1509       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1510 
1511       __ srdi(tmp1, R5_ARG3, 3);
1512       __ andi_(R5_ARG3, R5_ARG3, 7);
1513       __ mtctr(tmp1);
1514 
1515      if (!VM_Version::has_vsx()) {
1516 
1517       __ bind(l_6);
1518       // Use unrolled version for mass copying (copy 8 elements a time).
1519       // Load feeding store gets zero latency on power6, however not on power 5.
1520       // Therefore, the following sequence is made for the good of both.
1521       __ ld(tmp1, 0, R3_ARG1);
1522       __ ld(tmp2, 8, R3_ARG1);
1523       __ ld(tmp3, 16, R3_ARG1);
1524       __ ld(tmp4, 24, R3_ARG1);
1525       __ std(tmp1, 0, R4_ARG2);
1526       __ std(tmp2, 8, R4_ARG2);
1527       __ std(tmp3, 16, R4_ARG2);
1528       __ std(tmp4, 24, R4_ARG2);
1529       __ addi(R3_ARG1, R3_ARG1, 32);
1530       __ addi(R4_ARG2, R4_ARG2, 32);
1531       __ bdnz(l_6);
1532 
1533     } else { // Processor supports VSX, so use it to mass copy.
1534 
1535       // Prefetch the data into the L2 cache.
1536       __ dcbt(R3_ARG1, 0);
1537 
1538       // If supported set DSCR pre-fetch to deepest.
1539       if (VM_Version::has_mfdscr()) {
1540         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1541         __ mtdscr(tmp2);
1542       }
1543 
1544       __ li(tmp1, 16);
1545 
1546       // Backbranch target aligned to 32-byte. Not 16-byte align as
1547       // loop contains < 8 instructions that fit inside a single
1548       // i-cache sector.
1549       __ align(32);
1550 
1551       __ bind(l_7);
1552       // Use loop with VSX load/store instructions to
1553       // copy 8 elements a time.
1554       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1555       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1556       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1557       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1558       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1559       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1560       __ bdnz(l_7);                        // Dec CTR and loop if not zero.
1561 
1562       // Restore DSCR pre-fetch value.
1563       if (VM_Version::has_mfdscr()) {
1564         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1565         __ mtdscr(tmp2);
1566       }
1567 
1568     } // VSX
1569    } // FasterArrayCopy
1570 
1571     // copy 1 element at a time
1572     __ bind(l_2);
1573     __ cmpwi(CCR0, R5_ARG3, 0);
1574     __ beq(CCR0, l_1);
1575 
1576     { // FasterArrayCopy
1577       __ mtctr(R5_ARG3);
1578       __ addi(R3_ARG1, R3_ARG1, -4);
1579       __ addi(R4_ARG2, R4_ARG2, -4);
1580 
1581       __ bind(l_3);
1582       __ lwzu(tmp2, 4, R3_ARG1);
1583       __ stwu(tmp2, 4, R4_ARG2);
1584       __ bdnz(l_3);
1585     }
1586 
1587     __ bind(l_1);
1588     return;
1589   }
1590 
1591   // Generate stub for disjoint int copy.  If "aligned" is true, the
1592   // "from" and "to" addresses are assumed to be heapword aligned.
1593   //
1594   // Arguments for generated stub:
1595   //      from:  R3_ARG1
1596   //      to:    R4_ARG2
1597   //      count: R5_ARG3 treated as signed
1598   //
1599   address generate_disjoint_int_copy(bool aligned, const char * name) {
1600     StubCodeMark mark(this, "StubRoutines", name);
1601     address start = __ function_entry();
1602     assert_positive_int(R5_ARG3);
1603     generate_disjoint_int_copy_core(aligned);
1604     __ li(R3_RET, 0); // return 0
1605     __ blr();
1606     return start;
1607   }
1608 
1609   // Generate core code for conjoint int copy (and oop copy on
1610   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1611   // are assumed to be heapword aligned.
1612   //
1613   // Arguments:
1614   //      from:  R3_ARG1
1615   //      to:    R4_ARG2
1616   //      count: R5_ARG3 treated as signed
1617   //
1618   void generate_conjoint_int_copy_core(bool aligned) {
1619     // Do reverse copy.  We assume the case of actual overlap is rare enough
1620     // that we don't have to optimize it.
1621 
1622     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1623 
1624     Register tmp1 = R6_ARG4;
1625     Register tmp2 = R7_ARG5;
1626     Register tmp3 = R8_ARG6;
1627     Register tmp4 = R0;
1628 
1629     VectorSRegister tmp_vsr1  = VSR1;
1630     VectorSRegister tmp_vsr2  = VSR2;
1631 
1632     { // FasterArrayCopy
1633       __ cmpwi(CCR0, R5_ARG3, 0);
1634       __ beq(CCR0, l_6);
1635 
1636       __ sldi(R5_ARG3, R5_ARG3, 2);
1637       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1638       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1639       __ srdi(R5_ARG3, R5_ARG3, 2);
1640 
1641       if (!aligned) {
1642         // check if arrays have same alignment mod 8.
1643         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1644         __ andi_(R0, tmp1, 7);
1645         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1646         __ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time
1647 
1648         // copy 1 element to align to and from on an 8 byte boundary
1649         __ andi_(R0, R3_ARG1, 7);
1650         __ beq(CCR0, l_7);
1651 
1652         __ addi(R3_ARG1, R3_ARG1, -4);
1653         __ addi(R4_ARG2, R4_ARG2, -4);
1654         __ addi(R5_ARG3, R5_ARG3, -1);
1655         __ lwzx(tmp2, R3_ARG1);
1656         __ stwx(tmp2, R4_ARG2);
1657         __ bind(l_7);
1658       }
1659 
1660       __ cmpwi(CCR0, R5_ARG3, 7);
1661       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1662 
1663       __ srdi(tmp1, R5_ARG3, 3);
1664       __ andi(R5_ARG3, R5_ARG3, 7);
1665       __ mtctr(tmp1);
1666 
1667      if (!VM_Version::has_vsx()) {
1668       __ bind(l_4);
1669       // Use unrolled version for mass copying (copy 4 elements a time).
1670       // Load feeding store gets zero latency on Power6, however not on Power5.
1671       // Therefore, the following sequence is made for the good of both.
1672       __ addi(R3_ARG1, R3_ARG1, -32);
1673       __ addi(R4_ARG2, R4_ARG2, -32);
1674       __ ld(tmp4, 24, R3_ARG1);
1675       __ ld(tmp3, 16, R3_ARG1);
1676       __ ld(tmp2, 8, R3_ARG1);
1677       __ ld(tmp1, 0, R3_ARG1);
1678       __ std(tmp4, 24, R4_ARG2);
1679       __ std(tmp3, 16, R4_ARG2);
1680       __ std(tmp2, 8, R4_ARG2);
1681       __ std(tmp1, 0, R4_ARG2);
1682       __ bdnz(l_4);
1683      } else {  // Processor supports VSX, so use it to mass copy.
1684       // Prefetch the data into the L2 cache.
1685       __ dcbt(R3_ARG1, 0);
1686 
1687       // If supported set DSCR pre-fetch to deepest.
1688       if (VM_Version::has_mfdscr()) {
1689         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1690         __ mtdscr(tmp2);
1691       }
1692 
1693       __ li(tmp1, 16);
1694 
1695       // Backbranch target aligned to 32-byte. Not 16-byte align as
1696       // loop contains < 8 instructions that fit inside a single
1697       // i-cache sector.
1698       __ align(32);
1699 
1700       __ bind(l_4);
1701       // Use loop with VSX load/store instructions to
1702       // copy 8 elements a time.
1703       __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
1704       __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
1705       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
1706       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1707       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
1708       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1709       __ bdnz(l_4);
1710 
1711       // Restore DSCR pre-fetch value.
1712       if (VM_Version::has_mfdscr()) {
1713         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1714         __ mtdscr(tmp2);
1715       }
1716      }
1717 
1718       __ cmpwi(CCR0, R5_ARG3, 0);
1719       __ beq(CCR0, l_6);
1720 
1721       __ bind(l_5);
1722       __ mtctr(R5_ARG3);
1723       __ bind(l_3);
1724       __ lwz(R0, -4, R3_ARG1);
1725       __ stw(R0, -4, R4_ARG2);
1726       __ addi(R3_ARG1, R3_ARG1, -4);
1727       __ addi(R4_ARG2, R4_ARG2, -4);
1728       __ bdnz(l_3);
1729 
1730       __ bind(l_6);
1731     }
1732   }
1733 
1734   // Generate stub for conjoint int copy.  If "aligned" is true, the
1735   // "from" and "to" addresses are assumed to be heapword aligned.
1736   //
1737   // Arguments for generated stub:
1738   //      from:  R3_ARG1
1739   //      to:    R4_ARG2
1740   //      count: R5_ARG3 treated as signed
1741   //
1742   address generate_conjoint_int_copy(bool aligned, const char * name) {
1743     StubCodeMark mark(this, "StubRoutines", name);
1744     address start = __ function_entry();
1745     assert_positive_int(R5_ARG3);
1746     address nooverlap_target = aligned ?
1747       STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
1748       STUB_ENTRY(jint_disjoint_arraycopy);
1749 
1750     array_overlap_test(nooverlap_target, 2);
1751 
1752     generate_conjoint_int_copy_core(aligned);
1753 
1754     __ li(R3_RET, 0); // return 0
1755     __ blr();
1756 
1757     return start;
1758   }
1759 
1760   // Generate core code for disjoint long copy (and oop copy on
1761   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1762   // are assumed to be heapword aligned.
1763   //
1764   // Arguments:
1765   //      from:  R3_ARG1
1766   //      to:    R4_ARG2
1767   //      count: R5_ARG3 treated as signed
1768   //
1769   void generate_disjoint_long_copy_core(bool aligned) {
1770     Register tmp1 = R6_ARG4;
1771     Register tmp2 = R7_ARG5;
1772     Register tmp3 = R8_ARG6;
1773     Register tmp4 = R0;
1774 
1775     Label l_1, l_2, l_3, l_4, l_5;
1776 
1777     VectorSRegister tmp_vsr1  = VSR1;
1778     VectorSRegister tmp_vsr2  = VSR2;
1779 
1780     { // FasterArrayCopy
1781       __ cmpwi(CCR0, R5_ARG3, 3);
1782       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1783 
1784       __ srdi(tmp1, R5_ARG3, 2);
1785       __ andi_(R5_ARG3, R5_ARG3, 3);
1786       __ mtctr(tmp1);
1787 
1788     if (!VM_Version::has_vsx()) {
1789       __ bind(l_4);
1790       // Use unrolled version for mass copying (copy 4 elements a time).
1791       // Load feeding store gets zero latency on Power6, however not on Power5.
1792       // Therefore, the following sequence is made for the good of both.
1793       __ ld(tmp1, 0, R3_ARG1);
1794       __ ld(tmp2, 8, R3_ARG1);
1795       __ ld(tmp3, 16, R3_ARG1);
1796       __ ld(tmp4, 24, R3_ARG1);
1797       __ std(tmp1, 0, R4_ARG2);
1798       __ std(tmp2, 8, R4_ARG2);
1799       __ std(tmp3, 16, R4_ARG2);
1800       __ std(tmp4, 24, R4_ARG2);
1801       __ addi(R3_ARG1, R3_ARG1, 32);
1802       __ addi(R4_ARG2, R4_ARG2, 32);
1803       __ bdnz(l_4);
1804 
1805     } else { // Processor supports VSX, so use it to mass copy.
1806 
1807       // Prefetch the data into the L2 cache.
1808       __ dcbt(R3_ARG1, 0);
1809 
1810       // If supported set DSCR pre-fetch to deepest.
1811       if (VM_Version::has_mfdscr()) {
1812         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1813         __ mtdscr(tmp2);
1814       }
1815 
1816       __ li(tmp1, 16);
1817 
1818       // Backbranch target aligned to 32-byte. Not 16-byte align as
1819       // loop contains < 8 instructions that fit inside a single
1820       // i-cache sector.
1821       __ align(32);
1822 
1823       __ bind(l_5);
1824       // Use loop with VSX load/store instructions to
1825       // copy 4 elements a time.
1826       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1827       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1828       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1829       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1830       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1831       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1832       __ bdnz(l_5);                        // Dec CTR and loop if not zero.
1833 
1834       // Restore DSCR pre-fetch value.
1835       if (VM_Version::has_mfdscr()) {
1836         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1837         __ mtdscr(tmp2);
1838       }
1839 
1840     } // VSX
1841    } // FasterArrayCopy
1842 
1843     // copy 1 element at a time
1844     __ bind(l_3);
1845     __ cmpwi(CCR0, R5_ARG3, 0);
1846     __ beq(CCR0, l_1);
1847 
1848     { // FasterArrayCopy
1849       __ mtctr(R5_ARG3);
1850       __ addi(R3_ARG1, R3_ARG1, -8);
1851       __ addi(R4_ARG2, R4_ARG2, -8);
1852 
1853       __ bind(l_2);
1854       __ ldu(R0, 8, R3_ARG1);
1855       __ stdu(R0, 8, R4_ARG2);
1856       __ bdnz(l_2);
1857 
1858     }
1859     __ bind(l_1);
1860   }
1861 
1862   // Generate stub for disjoint long copy.  If "aligned" is true, the
1863   // "from" and "to" addresses are assumed to be heapword aligned.
1864   //
1865   // Arguments for generated stub:
1866   //      from:  R3_ARG1
1867   //      to:    R4_ARG2
1868   //      count: R5_ARG3 treated as signed
1869   //
1870   address generate_disjoint_long_copy(bool aligned, const char * name) {
1871     StubCodeMark mark(this, "StubRoutines", name);
1872     address start = __ function_entry();
1873     assert_positive_int(R5_ARG3);
1874     generate_disjoint_long_copy_core(aligned);
1875     __ li(R3_RET, 0); // return 0
1876     __ blr();
1877 
1878     return start;
1879   }
1880 
1881   // Generate core code for conjoint long copy (and oop copy on
1882   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1883   // are assumed to be heapword aligned.
1884   //
1885   // Arguments:
1886   //      from:  R3_ARG1
1887   //      to:    R4_ARG2
1888   //      count: R5_ARG3 treated as signed
1889   //
1890   void generate_conjoint_long_copy_core(bool aligned) {
1891     Register tmp1 = R6_ARG4;
1892     Register tmp2 = R7_ARG5;
1893     Register tmp3 = R8_ARG6;
1894     Register tmp4 = R0;
1895 
1896     VectorSRegister tmp_vsr1  = VSR1;
1897     VectorSRegister tmp_vsr2  = VSR2;
1898 
1899     Label l_1, l_2, l_3, l_4, l_5;
1900 
1901     __ cmpwi(CCR0, R5_ARG3, 0);
1902     __ beq(CCR0, l_1);
1903 
1904     { // FasterArrayCopy
1905       __ sldi(R5_ARG3, R5_ARG3, 3);
1906       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1907       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1908       __ srdi(R5_ARG3, R5_ARG3, 3);
1909 
1910       __ cmpwi(CCR0, R5_ARG3, 3);
1911       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1912 
1913       __ srdi(tmp1, R5_ARG3, 2);
1914       __ andi(R5_ARG3, R5_ARG3, 3);
1915       __ mtctr(tmp1);
1916 
1917      if (!VM_Version::has_vsx()) {
1918       __ bind(l_4);
1919       // Use unrolled version for mass copying (copy 4 elements a time).
1920       // Load feeding store gets zero latency on Power6, however not on Power5.
1921       // Therefore, the following sequence is made for the good of both.
1922       __ addi(R3_ARG1, R3_ARG1, -32);
1923       __ addi(R4_ARG2, R4_ARG2, -32);
1924       __ ld(tmp4, 24, R3_ARG1);
1925       __ ld(tmp3, 16, R3_ARG1);
1926       __ ld(tmp2, 8, R3_ARG1);
1927       __ ld(tmp1, 0, R3_ARG1);
1928       __ std(tmp4, 24, R4_ARG2);
1929       __ std(tmp3, 16, R4_ARG2);
1930       __ std(tmp2, 8, R4_ARG2);
1931       __ std(tmp1, 0, R4_ARG2);
1932       __ bdnz(l_4);
1933      } else { // Processor supports VSX, so use it to mass copy.
1934       // Prefetch the data into the L2 cache.
1935       __ dcbt(R3_ARG1, 0);
1936 
1937       // If supported set DSCR pre-fetch to deepest.
1938       if (VM_Version::has_mfdscr()) {
1939         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1940         __ mtdscr(tmp2);
1941       }
1942 
1943       __ li(tmp1, 16);
1944 
1945       // Backbranch target aligned to 32-byte. Not 16-byte align as
1946       // loop contains < 8 instructions that fit inside a single
1947       // i-cache sector.
1948       __ align(32);
1949 
1950       __ bind(l_4);
1951       // Use loop with VSX load/store instructions to
1952       // copy 4 elements a time.
1953       __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
1954       __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
1955       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
1956       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1957       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
1958       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1959       __ bdnz(l_4);
1960 
1961       // Restore DSCR pre-fetch value.
1962       if (VM_Version::has_mfdscr()) {
1963         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1964         __ mtdscr(tmp2);
1965       }
1966      }
1967 
1968       __ cmpwi(CCR0, R5_ARG3, 0);
1969       __ beq(CCR0, l_1);
1970 
1971       __ bind(l_5);
1972       __ mtctr(R5_ARG3);
1973       __ bind(l_3);
1974       __ ld(R0, -8, R3_ARG1);
1975       __ std(R0, -8, R4_ARG2);
1976       __ addi(R3_ARG1, R3_ARG1, -8);
1977       __ addi(R4_ARG2, R4_ARG2, -8);
1978       __ bdnz(l_3);
1979 
1980     }
1981     __ bind(l_1);
1982   }
1983 
1984   // Generate stub for conjoint long copy.  If "aligned" is true, the
1985   // "from" and "to" addresses are assumed to be heapword aligned.
1986   //
1987   // Arguments for generated stub:
1988   //      from:  R3_ARG1
1989   //      to:    R4_ARG2
1990   //      count: R5_ARG3 treated as signed
1991   //
1992   address generate_conjoint_long_copy(bool aligned, const char * name) {
1993     StubCodeMark mark(this, "StubRoutines", name);
1994     address start = __ function_entry();
1995     assert_positive_int(R5_ARG3);
1996     address nooverlap_target = aligned ?
1997       STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
1998       STUB_ENTRY(jlong_disjoint_arraycopy);
1999 
2000     array_overlap_test(nooverlap_target, 3);
2001     generate_conjoint_long_copy_core(aligned);
2002 
2003     __ li(R3_RET, 0); // return 0
2004     __ blr();
2005 
2006     return start;
2007   }
2008 
2009   // Generate stub for conjoint oop copy.  If "aligned" is true, the
2010   // "from" and "to" addresses are assumed to be heapword aligned.
2011   //
2012   // Arguments for generated stub:
2013   //      from:  R3_ARG1
2014   //      to:    R4_ARG2
2015   //      count: R5_ARG3 treated as signed
2016   //      dest_uninitialized: G1 support
2017   //
2018   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2019     StubCodeMark mark(this, "StubRoutines", name);
2020 
2021     address start = __ function_entry();
2022     assert_positive_int(R5_ARG3);
2023     address nooverlap_target = aligned ?
2024       STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
2025       STUB_ENTRY(oop_disjoint_arraycopy);
2026 
2027     BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen();
2028     DecoratorSet decorators = 0;
2029     if (dest_uninitialized) {
2030       decorators |= AS_DEST_NOT_INITIALIZED;
2031     }
2032     if (aligned) {
2033       decorators |= ARRAYCOPY_ALIGNED;
2034     }
2035     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
2036 
2037     if (UseCompressedOops) {
2038       array_overlap_test(nooverlap_target, 2);
2039       generate_conjoint_int_copy_core(aligned);
2040     } else {
2041       array_overlap_test(nooverlap_target, 3);
2042       generate_conjoint_long_copy_core(aligned);
2043     }
2044 
2045     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
2046     __ li(R3_RET, 0); // return 0
2047     __ blr();
2048     return start;
2049   }
2050 
2051   // Generate stub for disjoint oop copy.  If "aligned" is true, the
2052   // "from" and "to" addresses are assumed to be heapword aligned.
2053   //
2054   // Arguments for generated stub:
2055   //      from:  R3_ARG1
2056   //      to:    R4_ARG2
2057   //      count: R5_ARG3 treated as signed
2058   //      dest_uninitialized: G1 support
2059   //
2060   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2061     StubCodeMark mark(this, "StubRoutines", name);
2062     address start = __ function_entry();
2063     assert_positive_int(R5_ARG3);
2064 
2065     BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen();
2066     DecoratorSet decorators = ARRAYCOPY_DISJOINT;
2067     if (dest_uninitialized) {
2068       decorators |= AS_DEST_NOT_INITIALIZED;
2069     }
2070     if (aligned) {
2071       decorators |= ARRAYCOPY_ALIGNED;
2072     }
2073     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
2074 
2075     if (UseCompressedOops) {
2076       generate_disjoint_int_copy_core(aligned);
2077     } else {
2078       generate_disjoint_long_copy_core(aligned);
2079     }
2080 
2081     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
2082     __ li(R3_RET, 0); // return 0
2083     __ blr();
2084 
2085     return start;
2086   }
2087 
2088 
2089   // Helper for generating a dynamic type check.
2090   // Smashes only the given temp registers.
2091   void generate_type_check(Register sub_klass,
2092                            Register super_check_offset,
2093                            Register super_klass,
2094                            Register temp,
2095                            Label& L_success) {
2096     assert_different_registers(sub_klass, super_check_offset, super_klass);
2097 
2098     BLOCK_COMMENT("type_check:");
2099 
2100     Label L_miss;
2101 
2102     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
2103                                      super_check_offset);
2104     __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
2105 
2106     // Fall through on failure!
2107     __ bind(L_miss);
2108   }
2109 
2110 
2111   //  Generate stub for checked oop copy.
2112   //
2113   // Arguments for generated stub:
2114   //      from:  R3
2115   //      to:    R4
2116   //      count: R5 treated as signed
2117   //      ckoff: R6 (super_check_offset)
2118   //      ckval: R7 (super_klass)
2119   //      ret:   R3 zero for success; (-1^K) where K is partial transfer count
2120   //
2121   address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
2122 
2123     const Register R3_from   = R3_ARG1;      // source array address
2124     const Register R4_to     = R4_ARG2;      // destination array address
2125     const Register R5_count  = R5_ARG3;      // elements count
2126     const Register R6_ckoff  = R6_ARG4;      // super_check_offset
2127     const Register R7_ckval  = R7_ARG5;      // super_klass
2128 
2129     const Register R8_offset = R8_ARG6;      // loop var, with stride wordSize
2130     const Register R9_remain = R9_ARG7;      // loop var, with stride -1
2131     const Register R10_oop   = R10_ARG8;     // actual oop copied
2132     const Register R11_klass = R11_scratch1; // oop._klass
2133     const Register R12_tmp   = R12_scratch2;
2134 
2135     const Register R2_minus1 = R2;
2136 
2137     //__ align(CodeEntryAlignment);
2138     StubCodeMark mark(this, "StubRoutines", name);
2139     address start = __ function_entry();
2140 
2141     // Assert that int is 64 bit sign extended and arrays are not conjoint.
2142 #ifdef ASSERT
2143     {
2144     assert_positive_int(R5_ARG3);
2145     const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2146     Label no_overlap;
2147     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2148     __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2149     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2150     __ cmpld(CCR1, tmp1, tmp2);
2151     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2152     // Overlaps if Src before dst and distance smaller than size.
2153     // Branch to forward copy routine otherwise.
2154     __ blt(CCR0, no_overlap);
2155     __ stop("overlap in checkcast_copy", 0x9543);
2156     __ bind(no_overlap);
2157     }
2158 #endif
2159 
2160     BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen();
2161     DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
2162     if (dest_uninitialized) {
2163       decorators |= AS_DEST_NOT_INITIALIZED;
2164     }
2165     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval);
2166 
2167     //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2168 
2169     Label load_element, store_element, store_null, success, do_epilogue;
2170     __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2171     __ li(R8_offset, 0);                   // Offset from start of arrays.
2172     __ li(R2_minus1, -1);
2173     __ bne(CCR0, load_element);
2174 
2175     // Empty array: Nothing to do.
2176     __ li(R3_RET, 0);           // Return 0 on (trivial) success.
2177     __ blr();
2178 
2179     // ======== begin loop ========
2180     // (Entry is load_element.)
2181     __ align(OptoLoopAlignment);
2182     __ bind(store_element);
2183     if (UseCompressedOops) {
2184       __ encode_heap_oop_not_null(R10_oop);
2185       __ bind(store_null);
2186       __ stw(R10_oop, R8_offset, R4_to);
2187     } else {
2188       __ bind(store_null);
2189       __ std(R10_oop, R8_offset, R4_to);
2190     }
2191 
2192     __ addi(R8_offset, R8_offset, heapOopSize);   // Step to next offset.
2193     __ add_(R9_remain, R2_minus1, R9_remain);     // Decrement the count.
2194     __ beq(CCR0, success);
2195 
2196     // ======== loop entry is here ========
2197     __ bind(load_element);
2198     __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null);  // Load the oop.
2199 
2200     __ load_klass(R11_klass, R10_oop); // Query the object klass.
2201 
2202     generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2203                         // Branch to this on success:
2204                         store_element);
2205     // ======== end loop ========
2206 
2207     // It was a real error; we must depend on the caller to finish the job.
2208     // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2209     // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2210     // and report their number to the caller.
2211     __ subf_(R5_count, R9_remain, R5_count);
2212     __ nand(R3_RET, R5_count, R5_count);   // report (-1^K) to caller
2213     __ bne(CCR0, do_epilogue);
2214     __ blr();
2215 
2216     __ bind(success);
2217     __ li(R3_RET, 0);
2218 
2219     __ bind(do_epilogue);
2220     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET);
2221 
2222     __ blr();
2223     return start;
2224   }
2225 
2226 
2227   //  Generate 'unsafe' array copy stub.
2228   //  Though just as safe as the other stubs, it takes an unscaled
2229   //  size_t argument instead of an element count.
2230   //
2231   // Arguments for generated stub:
2232   //      from:  R3
2233   //      to:    R4
2234   //      count: R5 byte count, treated as ssize_t, can be zero
2235   //
2236   // Examines the alignment of the operands and dispatches
2237   // to a long, int, short, or byte copy loop.
2238   //
2239   address generate_unsafe_copy(const char* name,
2240                                address byte_copy_entry,
2241                                address short_copy_entry,
2242                                address int_copy_entry,
2243                                address long_copy_entry) {
2244 
2245     const Register R3_from   = R3_ARG1;      // source array address
2246     const Register R4_to     = R4_ARG2;      // destination array address
2247     const Register R5_count  = R5_ARG3;      // elements count (as long on PPC64)
2248 
2249     const Register R6_bits   = R6_ARG4;      // test copy of low bits
2250     const Register R7_tmp    = R7_ARG5;
2251 
2252     //__ align(CodeEntryAlignment);
2253     StubCodeMark mark(this, "StubRoutines", name);
2254     address start = __ function_entry();
2255 
2256     // Bump this on entry, not on exit:
2257     //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
2258 
2259     Label short_copy, int_copy, long_copy;
2260 
2261     __ orr(R6_bits, R3_from, R4_to);
2262     __ orr(R6_bits, R6_bits, R5_count);
2263     __ andi_(R0, R6_bits, (BytesPerLong-1));
2264     __ beq(CCR0, long_copy);
2265 
2266     __ andi_(R0, R6_bits, (BytesPerInt-1));
2267     __ beq(CCR0, int_copy);
2268 
2269     __ andi_(R0, R6_bits, (BytesPerShort-1));
2270     __ beq(CCR0, short_copy);
2271 
2272     // byte_copy:
2273     __ b(byte_copy_entry);
2274 
2275     __ bind(short_copy);
2276     __ srwi(R5_count, R5_count, LogBytesPerShort);
2277     __ b(short_copy_entry);
2278 
2279     __ bind(int_copy);
2280     __ srwi(R5_count, R5_count, LogBytesPerInt);
2281     __ b(int_copy_entry);
2282 
2283     __ bind(long_copy);
2284     __ srwi(R5_count, R5_count, LogBytesPerLong);
2285     __ b(long_copy_entry);
2286 
2287     return start;
2288   }
2289 
2290 
2291   // Perform range checks on the proposed arraycopy.
2292   // Kills the two temps, but nothing else.
2293   // Also, clean the sign bits of src_pos and dst_pos.
2294   void arraycopy_range_checks(Register src,     // source array oop
2295                               Register src_pos, // source position
2296                               Register dst,     // destination array oop
2297                               Register dst_pos, // destination position
2298                               Register length,  // length of copy
2299                               Register temp1, Register temp2,
2300                               Label& L_failed) {
2301     BLOCK_COMMENT("arraycopy_range_checks:");
2302 
2303     const Register array_length = temp1;  // scratch
2304     const Register end_pos      = temp2;  // scratch
2305 
2306     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2307     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
2308     __ add(end_pos, src_pos, length);  // src_pos + length
2309     __ cmpd(CCR0, end_pos, array_length);
2310     __ bgt(CCR0, L_failed);
2311 
2312     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2313     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
2314     __ add(end_pos, dst_pos, length);  // src_pos + length
2315     __ cmpd(CCR0, end_pos, array_length);
2316     __ bgt(CCR0, L_failed);
2317 
2318     BLOCK_COMMENT("arraycopy_range_checks done");
2319   }
2320 
2321 
2322   //
2323   //  Generate generic array copy stubs
2324   //
2325   //  Input:
2326   //    R3    -  src oop
2327   //    R4    -  src_pos
2328   //    R5    -  dst oop
2329   //    R6    -  dst_pos
2330   //    R7    -  element count
2331   //
2332   //  Output:
2333   //    R3 ==  0  -  success
2334   //    R3 == -1  -  need to call System.arraycopy
2335   //
2336   address generate_generic_copy(const char *name,
2337                                 address entry_jbyte_arraycopy,
2338                                 address entry_jshort_arraycopy,
2339                                 address entry_jint_arraycopy,
2340                                 address entry_oop_arraycopy,
2341                                 address entry_disjoint_oop_arraycopy,
2342                                 address entry_jlong_arraycopy,
2343                                 address entry_checkcast_arraycopy) {
2344     Label L_failed, L_objArray;
2345 
2346     // Input registers
2347     const Register src       = R3_ARG1;  // source array oop
2348     const Register src_pos   = R4_ARG2;  // source position
2349     const Register dst       = R5_ARG3;  // destination array oop
2350     const Register dst_pos   = R6_ARG4;  // destination position
2351     const Register length    = R7_ARG5;  // elements count
2352 
2353     // registers used as temp
2354     const Register src_klass = R8_ARG6;  // source array klass
2355     const Register dst_klass = R9_ARG7;  // destination array klass
2356     const Register lh        = R10_ARG8; // layout handler
2357     const Register temp      = R2;
2358 
2359     //__ align(CodeEntryAlignment);
2360     StubCodeMark mark(this, "StubRoutines", name);
2361     address start = __ function_entry();
2362 
2363     // Bump this on entry, not on exit:
2364     //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
2365 
2366     // In principle, the int arguments could be dirty.
2367 
2368     //-----------------------------------------------------------------------
2369     // Assembler stubs will be used for this call to arraycopy
2370     // if the following conditions are met:
2371     //
2372     // (1) src and dst must not be null.
2373     // (2) src_pos must not be negative.
2374     // (3) dst_pos must not be negative.
2375     // (4) length  must not be negative.
2376     // (5) src klass and dst klass should be the same and not NULL.
2377     // (6) src and dst should be arrays.
2378     // (7) src_pos + length must not exceed length of src.
2379     // (8) dst_pos + length must not exceed length of dst.
2380     BLOCK_COMMENT("arraycopy initial argument checks");
2381 
2382     __ cmpdi(CCR1, src, 0);      // if (src == NULL) return -1;
2383     __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
2384     __ cmpdi(CCR5, dst, 0);      // if (dst == NULL) return -1;
2385     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2386     __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
2387     __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
2388     __ extsw_(length, length);   // if (length < 0) return -1;
2389     __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
2390     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2391     __ beq(CCR1, L_failed);
2392 
2393     BLOCK_COMMENT("arraycopy argument klass checks");
2394     __ load_klass(src_klass, src);
2395     __ load_klass(dst_klass, dst);
2396 
2397     // Load layout helper
2398     //
2399     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2400     // 32        30    24            16              8     2                 0
2401     //
2402     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2403     //
2404 
2405     int lh_offset = in_bytes(Klass::layout_helper_offset());
2406 
2407     // Load 32-bits signed value. Use br() instruction with it to check icc.
2408     __ lwz(lh, lh_offset, src_klass);
2409 
2410     // Handle objArrays completely differently...
2411     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2412     __ load_const_optimized(temp, objArray_lh, R0);
2413     __ cmpw(CCR0, lh, temp);
2414     __ beq(CCR0, L_objArray);
2415 
2416     __ cmpd(CCR5, src_klass, dst_klass);          // if (src->klass() != dst->klass()) return -1;
2417     __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
2418 
2419     __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
2420     __ beq(CCR5, L_failed);
2421 
2422     // At this point, it is known to be a typeArray (array_tag 0x3).
2423 #ifdef ASSERT
2424     { Label L;
2425       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2426       __ load_const_optimized(temp, lh_prim_tag_in_place, R0);
2427       __ cmpw(CCR0, lh, temp);
2428       __ bge(CCR0, L);
2429       __ stop("must be a primitive array");
2430       __ bind(L);
2431     }
2432 #endif
2433 
2434     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2435                            temp, dst_klass, L_failed);
2436 
2437     // TypeArrayKlass
2438     //
2439     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2440     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2441     //
2442 
2443     const Register offset = dst_klass;    // array offset
2444     const Register elsize = src_klass;    // log2 element size
2445 
2446     __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
2447     __ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
2448     __ add(src, offset, src);       // src array offset
2449     __ add(dst, offset, dst);       // dst array offset
2450 
2451     // Next registers should be set before the jump to corresponding stub.
2452     const Register from     = R3_ARG1;  // source array address
2453     const Register to       = R4_ARG2;  // destination array address
2454     const Register count    = R5_ARG3;  // elements count
2455 
2456     // 'from', 'to', 'count' registers should be set in this order
2457     // since they are the same as 'src', 'src_pos', 'dst'.
2458 
2459     BLOCK_COMMENT("scale indexes to element size");
2460     __ sld(src_pos, src_pos, elsize);
2461     __ sld(dst_pos, dst_pos, elsize);
2462     __ add(from, src_pos, src);  // src_addr
2463     __ add(to, dst_pos, dst);    // dst_addr
2464     __ mr(count, length);        // length
2465 
2466     BLOCK_COMMENT("choose copy loop based on element size");
2467     // Using conditional branches with range 32kB.
2468     const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
2469     __ cmpwi(CCR0, elsize, 0);
2470     __ bc(bo, bi, entry_jbyte_arraycopy);
2471     __ cmpwi(CCR0, elsize, LogBytesPerShort);
2472     __ bc(bo, bi, entry_jshort_arraycopy);
2473     __ cmpwi(CCR0, elsize, LogBytesPerInt);
2474     __ bc(bo, bi, entry_jint_arraycopy);
2475 #ifdef ASSERT
2476     { Label L;
2477       __ cmpwi(CCR0, elsize, LogBytesPerLong);
2478       __ beq(CCR0, L);
2479       __ stop("must be long copy, but elsize is wrong");
2480       __ bind(L);
2481     }
2482 #endif
2483     __ b(entry_jlong_arraycopy);
2484 
2485     // ObjArrayKlass
2486   __ bind(L_objArray);
2487     // live at this point:  src_klass, dst_klass, src[_pos], dst[_pos], length
2488 
2489     Label L_disjoint_plain_copy, L_checkcast_copy;
2490     //  test array classes for subtyping
2491     __ cmpd(CCR0, src_klass, dst_klass);         // usual case is exact equality
2492     __ bne(CCR0, L_checkcast_copy);
2493 
2494     // Identically typed arrays can be copied without element-wise checks.
2495     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2496                            temp, lh, L_failed);
2497 
2498     __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2499     __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2500     __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2501     __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2502     __ add(from, src_pos, src);  // src_addr
2503     __ add(to, dst_pos, dst);    // dst_addr
2504     __ mr(count, length);        // length
2505     __ b(entry_oop_arraycopy);
2506 
2507   __ bind(L_checkcast_copy);
2508     // live at this point:  src_klass, dst_klass
2509     {
2510       // Before looking at dst.length, make sure dst is also an objArray.
2511       __ lwz(temp, lh_offset, dst_klass);
2512       __ cmpw(CCR0, lh, temp);
2513       __ bne(CCR0, L_failed);
2514 
2515       // It is safe to examine both src.length and dst.length.
2516       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2517                              temp, lh, L_failed);
2518 
2519       // Marshal the base address arguments now, freeing registers.
2520       __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2521       __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2522       __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2523       __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2524       __ add(from, src_pos, src);  // src_addr
2525       __ add(to, dst_pos, dst);    // dst_addr
2526       __ mr(count, length);        // length
2527 
2528       Register sco_temp = R6_ARG4;             // This register is free now.
2529       assert_different_registers(from, to, count, sco_temp,
2530                                  dst_klass, src_klass);
2531 
2532       // Generate the type check.
2533       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2534       __ lwz(sco_temp, sco_offset, dst_klass);
2535       generate_type_check(src_klass, sco_temp, dst_klass,
2536                           temp, L_disjoint_plain_copy);
2537 
2538       // Fetch destination element klass from the ObjArrayKlass header.
2539       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2540 
2541       // The checkcast_copy loop needs two extra arguments:
2542       __ ld(R7_ARG5, ek_offset, dst_klass);   // dest elem klass
2543       __ lwz(R6_ARG4, sco_offset, R7_ARG5);   // sco of elem klass
2544       __ b(entry_checkcast_arraycopy);
2545     }
2546 
2547     __ bind(L_disjoint_plain_copy);
2548     __ b(entry_disjoint_oop_arraycopy);
2549 
2550   __ bind(L_failed);
2551     __ li(R3_RET, -1); // return -1
2552     __ blr();
2553     return start;
2554   }
2555 
2556   // Arguments for generated stub:
2557   //   R3_ARG1   - source byte array address
2558   //   R4_ARG2   - destination byte array address
2559   //   R5_ARG3   - round key array
2560   address generate_aescrypt_encryptBlock() {
2561     assert(UseAES, "need AES instructions and misaligned SSE support");
2562     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2563 
2564     address start = __ function_entry();
2565 
2566     Label L_doLast;
2567 
2568     Register from           = R3_ARG1;  // source array address
2569     Register to             = R4_ARG2;  // destination array address
2570     Register key            = R5_ARG3;  // round key array
2571 
2572     Register keylen         = R8;
2573     Register temp           = R9;
2574     Register keypos         = R10;
2575     Register fifteen        = R12;
2576 
2577     VectorRegister vRet     = VR0;
2578 
2579     VectorRegister vKey1    = VR1;
2580     VectorRegister vKey2    = VR2;
2581     VectorRegister vKey3    = VR3;
2582     VectorRegister vKey4    = VR4;
2583 
2584     VectorRegister fromPerm = VR5;
2585     VectorRegister keyPerm  = VR6;
2586     VectorRegister toPerm   = VR7;
2587     VectorRegister fSplt    = VR8;
2588 
2589     VectorRegister vTmp1    = VR9;
2590     VectorRegister vTmp2    = VR10;
2591     VectorRegister vTmp3    = VR11;
2592     VectorRegister vTmp4    = VR12;
2593 
2594     __ li              (fifteen, 15);
2595 
2596     // load unaligned from[0-15] to vsRet
2597     __ lvx             (vRet, from);
2598     __ lvx             (vTmp1, fifteen, from);
2599     __ lvsl            (fromPerm, from);
2600 #ifdef VM_LITTLE_ENDIAN
2601     __ vspltisb        (fSplt, 0x0f);
2602     __ vxor            (fromPerm, fromPerm, fSplt);
2603 #endif
2604     __ vperm           (vRet, vRet, vTmp1, fromPerm);
2605 
2606     // load keylen (44 or 52 or 60)
2607     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2608 
2609     // to load keys
2610     __ load_perm       (keyPerm, key);
2611 #ifdef VM_LITTLE_ENDIAN
2612     __ vspltisb        (vTmp2, -16);
2613     __ vrld            (keyPerm, keyPerm, vTmp2);
2614     __ vrld            (keyPerm, keyPerm, vTmp2);
2615     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2616 #endif
2617 
2618     // load the 1st round key to vTmp1
2619     __ lvx             (vTmp1, key);
2620     __ li              (keypos, 16);
2621     __ lvx             (vKey1, keypos, key);
2622     __ vec_perm        (vTmp1, vKey1, keyPerm);
2623 
2624     // 1st round
2625     __ vxor            (vRet, vRet, vTmp1);
2626 
2627     // load the 2nd round key to vKey1
2628     __ li              (keypos, 32);
2629     __ lvx             (vKey2, keypos, key);
2630     __ vec_perm        (vKey1, vKey2, keyPerm);
2631 
2632     // load the 3rd round key to vKey2
2633     __ li              (keypos, 48);
2634     __ lvx             (vKey3, keypos, key);
2635     __ vec_perm        (vKey2, vKey3, keyPerm);
2636 
2637     // load the 4th round key to vKey3
2638     __ li              (keypos, 64);
2639     __ lvx             (vKey4, keypos, key);
2640     __ vec_perm        (vKey3, vKey4, keyPerm);
2641 
2642     // load the 5th round key to vKey4
2643     __ li              (keypos, 80);
2644     __ lvx             (vTmp1, keypos, key);
2645     __ vec_perm        (vKey4, vTmp1, keyPerm);
2646 
2647     // 2nd - 5th rounds
2648     __ vcipher         (vRet, vRet, vKey1);
2649     __ vcipher         (vRet, vRet, vKey2);
2650     __ vcipher         (vRet, vRet, vKey3);
2651     __ vcipher         (vRet, vRet, vKey4);
2652 
2653     // load the 6th round key to vKey1
2654     __ li              (keypos, 96);
2655     __ lvx             (vKey2, keypos, key);
2656     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2657 
2658     // load the 7th round key to vKey2
2659     __ li              (keypos, 112);
2660     __ lvx             (vKey3, keypos, key);
2661     __ vec_perm        (vKey2, vKey3, keyPerm);
2662 
2663     // load the 8th round key to vKey3
2664     __ li              (keypos, 128);
2665     __ lvx             (vKey4, keypos, key);
2666     __ vec_perm        (vKey3, vKey4, keyPerm);
2667 
2668     // load the 9th round key to vKey4
2669     __ li              (keypos, 144);
2670     __ lvx             (vTmp1, keypos, key);
2671     __ vec_perm        (vKey4, vTmp1, keyPerm);
2672 
2673     // 6th - 9th rounds
2674     __ vcipher         (vRet, vRet, vKey1);
2675     __ vcipher         (vRet, vRet, vKey2);
2676     __ vcipher         (vRet, vRet, vKey3);
2677     __ vcipher         (vRet, vRet, vKey4);
2678 
2679     // load the 10th round key to vKey1
2680     __ li              (keypos, 160);
2681     __ lvx             (vKey2, keypos, key);
2682     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2683 
2684     // load the 11th round key to vKey2
2685     __ li              (keypos, 176);
2686     __ lvx             (vTmp1, keypos, key);
2687     __ vec_perm        (vKey2, vTmp1, keyPerm);
2688 
2689     // if all round keys are loaded, skip next 4 rounds
2690     __ cmpwi           (CCR0, keylen, 44);
2691     __ beq             (CCR0, L_doLast);
2692 
2693     // 10th - 11th rounds
2694     __ vcipher         (vRet, vRet, vKey1);
2695     __ vcipher         (vRet, vRet, vKey2);
2696 
2697     // load the 12th round key to vKey1
2698     __ li              (keypos, 192);
2699     __ lvx             (vKey2, keypos, key);
2700     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2701 
2702     // load the 13th round key to vKey2
2703     __ li              (keypos, 208);
2704     __ lvx             (vTmp1, keypos, key);
2705     __ vec_perm        (vKey2, vTmp1, keyPerm);
2706 
2707     // if all round keys are loaded, skip next 2 rounds
2708     __ cmpwi           (CCR0, keylen, 52);
2709     __ beq             (CCR0, L_doLast);
2710 
2711     // 12th - 13th rounds
2712     __ vcipher         (vRet, vRet, vKey1);
2713     __ vcipher         (vRet, vRet, vKey2);
2714 
2715     // load the 14th round key to vKey1
2716     __ li              (keypos, 224);
2717     __ lvx             (vKey2, keypos, key);
2718     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2719 
2720     // load the 15th round key to vKey2
2721     __ li              (keypos, 240);
2722     __ lvx             (vTmp1, keypos, key);
2723     __ vec_perm        (vKey2, vTmp1, keyPerm);
2724 
2725     __ bind(L_doLast);
2726 
2727     // last two rounds
2728     __ vcipher         (vRet, vRet, vKey1);
2729     __ vcipherlast     (vRet, vRet, vKey2);
2730 
2731     // store result (unaligned)
2732 #ifdef VM_LITTLE_ENDIAN
2733     __ lvsl            (toPerm, to);
2734 #else
2735     __ lvsr            (toPerm, to);
2736 #endif
2737     __ vspltisb        (vTmp3, -1);
2738     __ vspltisb        (vTmp4, 0);
2739     __ lvx             (vTmp1, to);
2740     __ lvx             (vTmp2, fifteen, to);
2741 #ifdef VM_LITTLE_ENDIAN
2742     __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
2743     __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
2744 #else
2745     __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
2746 #endif
2747     __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
2748     __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
2749     __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
2750     __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
2751     __ stvx            (vTmp1, to);
2752 
2753     __ blr();
2754      return start;
2755   }
2756 
2757   // Arguments for generated stub:
2758   //   R3_ARG1   - source byte array address
2759   //   R4_ARG2   - destination byte array address
2760   //   R5_ARG3   - K (key) in little endian int array
2761   address generate_aescrypt_decryptBlock() {
2762     assert(UseAES, "need AES instructions and misaligned SSE support");
2763     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2764 
2765     address start = __ function_entry();
2766 
2767     Label L_doLast;
2768     Label L_do44;
2769     Label L_do52;
2770     Label L_do60;
2771 
2772     Register from           = R3_ARG1;  // source array address
2773     Register to             = R4_ARG2;  // destination array address
2774     Register key            = R5_ARG3;  // round key array
2775 
2776     Register keylen         = R8;
2777     Register temp           = R9;
2778     Register keypos         = R10;
2779     Register fifteen        = R12;
2780 
2781     VectorRegister vRet     = VR0;
2782 
2783     VectorRegister vKey1    = VR1;
2784     VectorRegister vKey2    = VR2;
2785     VectorRegister vKey3    = VR3;
2786     VectorRegister vKey4    = VR4;
2787     VectorRegister vKey5    = VR5;
2788 
2789     VectorRegister fromPerm = VR6;
2790     VectorRegister keyPerm  = VR7;
2791     VectorRegister toPerm   = VR8;
2792     VectorRegister fSplt    = VR9;
2793 
2794     VectorRegister vTmp1    = VR10;
2795     VectorRegister vTmp2    = VR11;
2796     VectorRegister vTmp3    = VR12;
2797     VectorRegister vTmp4    = VR13;
2798 
2799     __ li              (fifteen, 15);
2800 
2801     // load unaligned from[0-15] to vsRet
2802     __ lvx             (vRet, from);
2803     __ lvx             (vTmp1, fifteen, from);
2804     __ lvsl            (fromPerm, from);
2805 #ifdef VM_LITTLE_ENDIAN
2806     __ vspltisb        (fSplt, 0x0f);
2807     __ vxor            (fromPerm, fromPerm, fSplt);
2808 #endif
2809     __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
2810 
2811     // load keylen (44 or 52 or 60)
2812     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2813 
2814     // to load keys
2815     __ load_perm       (keyPerm, key);
2816 #ifdef VM_LITTLE_ENDIAN
2817     __ vxor            (vTmp2, vTmp2, vTmp2);
2818     __ vspltisb        (vTmp2, -16);
2819     __ vrld            (keyPerm, keyPerm, vTmp2);
2820     __ vrld            (keyPerm, keyPerm, vTmp2);
2821     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2822 #endif
2823 
2824     __ cmpwi           (CCR0, keylen, 44);
2825     __ beq             (CCR0, L_do44);
2826 
2827     __ cmpwi           (CCR0, keylen, 52);
2828     __ beq             (CCR0, L_do52);
2829 
2830     // load the 15th round key to vKey1
2831     __ li              (keypos, 240);
2832     __ lvx             (vKey1, keypos, key);
2833     __ li              (keypos, 224);
2834     __ lvx             (vKey2, keypos, key);
2835     __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
2836 
2837     // load the 14th round key to vKey2
2838     __ li              (keypos, 208);
2839     __ lvx             (vKey3, keypos, key);
2840     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2841 
2842     // load the 13th round key to vKey3
2843     __ li              (keypos, 192);
2844     __ lvx             (vKey4, keypos, key);
2845     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2846 
2847     // load the 12th round key to vKey4
2848     __ li              (keypos, 176);
2849     __ lvx             (vKey5, keypos, key);
2850     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2851 
2852     // load the 11th round key to vKey5
2853     __ li              (keypos, 160);
2854     __ lvx             (vTmp1, keypos, key);
2855     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2856 
2857     // 1st - 5th rounds
2858     __ vxor            (vRet, vRet, vKey1);
2859     __ vncipher        (vRet, vRet, vKey2);
2860     __ vncipher        (vRet, vRet, vKey3);
2861     __ vncipher        (vRet, vRet, vKey4);
2862     __ vncipher        (vRet, vRet, vKey5);
2863 
2864     __ b               (L_doLast);
2865 
2866     __ bind            (L_do52);
2867 
2868     // load the 13th round key to vKey1
2869     __ li              (keypos, 208);
2870     __ lvx             (vKey1, keypos, key);
2871     __ li              (keypos, 192);
2872     __ lvx             (vKey2, keypos, key);
2873     __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
2874 
2875     // load the 12th round key to vKey2
2876     __ li              (keypos, 176);
2877     __ lvx             (vKey3, keypos, key);
2878     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2879 
2880     // load the 11th round key to vKey3
2881     __ li              (keypos, 160);
2882     __ lvx             (vTmp1, keypos, key);
2883     __ vec_perm        (vKey3, vTmp1, vKey3, keyPerm);
2884 
2885     // 1st - 3rd rounds
2886     __ vxor            (vRet, vRet, vKey1);
2887     __ vncipher        (vRet, vRet, vKey2);
2888     __ vncipher        (vRet, vRet, vKey3);
2889 
2890     __ b               (L_doLast);
2891 
2892     __ bind            (L_do44);
2893 
2894     // load the 11th round key to vKey1
2895     __ li              (keypos, 176);
2896     __ lvx             (vKey1, keypos, key);
2897     __ li              (keypos, 160);
2898     __ lvx             (vTmp1, keypos, key);
2899     __ vec_perm        (vKey1, vTmp1, vKey1, keyPerm);
2900 
2901     // 1st round
2902     __ vxor            (vRet, vRet, vKey1);
2903 
2904     __ bind            (L_doLast);
2905 
2906     // load the 10th round key to vKey1
2907     __ li              (keypos, 144);
2908     __ lvx             (vKey2, keypos, key);
2909     __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
2910 
2911     // load the 9th round key to vKey2
2912     __ li              (keypos, 128);
2913     __ lvx             (vKey3, keypos, key);
2914     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2915 
2916     // load the 8th round key to vKey3
2917     __ li              (keypos, 112);
2918     __ lvx             (vKey4, keypos, key);
2919     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2920 
2921     // load the 7th round key to vKey4
2922     __ li              (keypos, 96);
2923     __ lvx             (vKey5, keypos, key);
2924     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2925 
2926     // load the 6th round key to vKey5
2927     __ li              (keypos, 80);
2928     __ lvx             (vTmp1, keypos, key);
2929     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2930 
2931     // last 10th - 6th rounds
2932     __ vncipher        (vRet, vRet, vKey1);
2933     __ vncipher        (vRet, vRet, vKey2);
2934     __ vncipher        (vRet, vRet, vKey3);
2935     __ vncipher        (vRet, vRet, vKey4);
2936     __ vncipher        (vRet, vRet, vKey5);
2937 
2938     // load the 5th round key to vKey1
2939     __ li              (keypos, 64);
2940     __ lvx             (vKey2, keypos, key);
2941     __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
2942 
2943     // load the 4th round key to vKey2
2944     __ li              (keypos, 48);
2945     __ lvx             (vKey3, keypos, key);
2946     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2947 
2948     // load the 3rd round key to vKey3
2949     __ li              (keypos, 32);
2950     __ lvx             (vKey4, keypos, key);
2951     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2952 
2953     // load the 2nd round key to vKey4
2954     __ li              (keypos, 16);
2955     __ lvx             (vKey5, keypos, key);
2956     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2957 
2958     // load the 1st round key to vKey5
2959     __ lvx             (vTmp1, key);
2960     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2961 
2962     // last 5th - 1th rounds
2963     __ vncipher        (vRet, vRet, vKey1);
2964     __ vncipher        (vRet, vRet, vKey2);
2965     __ vncipher        (vRet, vRet, vKey3);
2966     __ vncipher        (vRet, vRet, vKey4);
2967     __ vncipherlast    (vRet, vRet, vKey5);
2968 
2969     // store result (unaligned)
2970 #ifdef VM_LITTLE_ENDIAN
2971     __ lvsl            (toPerm, to);
2972 #else
2973     __ lvsr            (toPerm, to);
2974 #endif
2975     __ vspltisb        (vTmp3, -1);
2976     __ vspltisb        (vTmp4, 0);
2977     __ lvx             (vTmp1, to);
2978     __ lvx             (vTmp2, fifteen, to);
2979 #ifdef VM_LITTLE_ENDIAN
2980     __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
2981     __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
2982 #else
2983     __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
2984 #endif
2985     __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
2986     __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
2987     __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
2988     __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
2989     __ stvx            (vTmp1, to);
2990 
2991     __ blr();
2992      return start;
2993   }
2994 
2995   address generate_sha256_implCompress(bool multi_block, const char *name) {
2996     assert(UseSHA, "need SHA instructions");
2997     StubCodeMark mark(this, "StubRoutines", name);
2998     address start = __ function_entry();
2999 
3000     __ sha256 (multi_block);
3001 
3002     __ blr();
3003     return start;
3004   }
3005 
3006   address generate_sha512_implCompress(bool multi_block, const char *name) {
3007     assert(UseSHA, "need SHA instructions");
3008     StubCodeMark mark(this, "StubRoutines", name);
3009     address start = __ function_entry();
3010 
3011     __ sha512 (multi_block);
3012 
3013     __ blr();
3014     return start;
3015   }
3016 
3017   void generate_arraycopy_stubs() {
3018     // Note: the disjoint stubs must be generated first, some of
3019     // the conjoint stubs use them.
3020 
3021     // non-aligned disjoint versions
3022     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
3023     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
3024     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
3025     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
3026     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
3027     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
3028 
3029     // aligned disjoint versions
3030     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
3031     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
3032     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
3033     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
3034     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
3035     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
3036 
3037     // non-aligned conjoint versions
3038     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
3039     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
3040     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
3041     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
3042     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
3043     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
3044 
3045     // aligned conjoint versions
3046     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
3047     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
3048     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
3049     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
3050     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
3051     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
3052 
3053     // special/generic versions
3054     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", false);
3055     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
3056 
3057     StubRoutines::_unsafe_arraycopy  = generate_unsafe_copy("unsafe_arraycopy",
3058                                                             STUB_ENTRY(jbyte_arraycopy),
3059                                                             STUB_ENTRY(jshort_arraycopy),
3060                                                             STUB_ENTRY(jint_arraycopy),
3061                                                             STUB_ENTRY(jlong_arraycopy));
3062     StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
3063                                                              STUB_ENTRY(jbyte_arraycopy),
3064                                                              STUB_ENTRY(jshort_arraycopy),
3065                                                              STUB_ENTRY(jint_arraycopy),
3066                                                              STUB_ENTRY(oop_arraycopy),
3067                                                              STUB_ENTRY(oop_disjoint_arraycopy),
3068                                                              STUB_ENTRY(jlong_arraycopy),
3069                                                              STUB_ENTRY(checkcast_arraycopy));
3070 
3071     // fill routines
3072     if (OptimizeFill) {
3073       StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
3074       StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
3075       StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
3076       StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
3077       StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3078       StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
3079     }
3080   }
3081 
3082   // Safefetch stubs.
3083   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
3084     // safefetch signatures:
3085     //   int      SafeFetch32(int*      adr, int      errValue);
3086     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
3087     //
3088     // arguments:
3089     //   R3_ARG1 = adr
3090     //   R4_ARG2 = errValue
3091     //
3092     // result:
3093     //   R3_RET  = *adr or errValue
3094 
3095     StubCodeMark mark(this, "StubRoutines", name);
3096 
3097     // Entry point, pc or function descriptor.
3098     *entry = __ function_entry();
3099 
3100     // Load *adr into R4_ARG2, may fault.
3101     *fault_pc = __ pc();
3102     switch (size) {
3103       case 4:
3104         // int32_t, signed extended
3105         __ lwa(R4_ARG2, 0, R3_ARG1);
3106         break;
3107       case 8:
3108         // int64_t
3109         __ ld(R4_ARG2, 0, R3_ARG1);
3110         break;
3111       default:
3112         ShouldNotReachHere();
3113     }
3114 
3115     // return errValue or *adr
3116     *continuation_pc = __ pc();
3117     __ mr(R3_RET, R4_ARG2);
3118     __ blr();
3119   }
3120 
3121   // Stub for BigInteger::multiplyToLen()
3122   //
3123   //  Arguments:
3124   //
3125   //  Input:
3126   //    R3 - x address
3127   //    R4 - x length
3128   //    R5 - y address
3129   //    R6 - y length
3130   //    R7 - z address
3131   //    R8 - z length
3132   //
3133   address generate_multiplyToLen() {
3134 
3135     StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
3136 
3137     address start = __ function_entry();
3138 
3139     const Register x     = R3;
3140     const Register xlen  = R4;
3141     const Register y     = R5;
3142     const Register ylen  = R6;
3143     const Register z     = R7;
3144     const Register zlen  = R8;
3145 
3146     const Register tmp1  = R2; // TOC not used.
3147     const Register tmp2  = R9;
3148     const Register tmp3  = R10;
3149     const Register tmp4  = R11;
3150     const Register tmp5  = R12;
3151 
3152     // non-volatile regs
3153     const Register tmp6  = R31;
3154     const Register tmp7  = R30;
3155     const Register tmp8  = R29;
3156     const Register tmp9  = R28;
3157     const Register tmp10 = R27;
3158     const Register tmp11 = R26;
3159     const Register tmp12 = R25;
3160     const Register tmp13 = R24;
3161 
3162     BLOCK_COMMENT("Entry:");
3163 
3164     // C2 does not respect int to long conversion for stub calls.
3165     __ clrldi(xlen, xlen, 32);
3166     __ clrldi(ylen, ylen, 32);
3167     __ clrldi(zlen, zlen, 32);
3168 
3169     // Save non-volatile regs (frameless).
3170     int current_offs = 8;
3171     __ std(R24, -current_offs, R1_SP); current_offs += 8;
3172     __ std(R25, -current_offs, R1_SP); current_offs += 8;
3173     __ std(R26, -current_offs, R1_SP); current_offs += 8;
3174     __ std(R27, -current_offs, R1_SP); current_offs += 8;
3175     __ std(R28, -current_offs, R1_SP); current_offs += 8;
3176     __ std(R29, -current_offs, R1_SP); current_offs += 8;
3177     __ std(R30, -current_offs, R1_SP); current_offs += 8;
3178     __ std(R31, -current_offs, R1_SP);
3179 
3180     __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
3181                        tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
3182 
3183     // Restore non-volatile regs.
3184     current_offs = 8;
3185     __ ld(R24, -current_offs, R1_SP); current_offs += 8;
3186     __ ld(R25, -current_offs, R1_SP); current_offs += 8;
3187     __ ld(R26, -current_offs, R1_SP); current_offs += 8;
3188     __ ld(R27, -current_offs, R1_SP); current_offs += 8;
3189     __ ld(R28, -current_offs, R1_SP); current_offs += 8;
3190     __ ld(R29, -current_offs, R1_SP); current_offs += 8;
3191     __ ld(R30, -current_offs, R1_SP); current_offs += 8;
3192     __ ld(R31, -current_offs, R1_SP);
3193 
3194     __ blr();  // Return to caller.
3195 
3196     return start;
3197   }
3198 
3199 
3200   // Compute CRC32/CRC32C function.
3201   void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
3202 
3203       // arguments to kernel_crc32:
3204       const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3205       const Register data    = R4_ARG2;  // source byte array
3206       const Register dataLen = R5_ARG3;  // #bytes to process
3207 
3208       const Register t0      = R2;
3209       const Register t1      = R7;
3210       const Register t2      = R8;
3211       const Register t3      = R9;
3212       const Register tc0     = R10;
3213       const Register tc1     = R11;
3214       const Register tc2     = R12;
3215 
3216       BLOCK_COMMENT("Stub body {");
3217       assert_different_registers(crc, data, dataLen, table);
3218 
3219       __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
3220 
3221       BLOCK_COMMENT("return");
3222       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3223       __ blr();
3224 
3225       BLOCK_COMMENT("} Stub body");
3226   }
3227 
3228   /**
3229   *  Arguments:
3230   *
3231   *  Input:
3232   *   R3_ARG1    - out address
3233   *   R4_ARG2    - in address
3234   *   R5_ARG3    - offset
3235   *   R6_ARG4    - len
3236   *   R7_ARG5    - k
3237   *  Output:
3238   *   R3_RET     - carry
3239   */
3240   address generate_mulAdd() {
3241     __ align(CodeEntryAlignment);
3242     StubCodeMark mark(this, "StubRoutines", "mulAdd");
3243 
3244     address start = __ function_entry();
3245 
3246     // C2 does not sign extend signed parameters to full 64 bits registers:
3247     __ rldic (R5_ARG3, R5_ARG3, 2, 32);  // always positive
3248     __ clrldi(R6_ARG4, R6_ARG4, 32);     // force zero bits on higher word
3249     __ clrldi(R7_ARG5, R7_ARG5, 32);     // force zero bits on higher word
3250 
3251     __ muladd(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4, R7_ARG5, R8, R9, R10);
3252 
3253     // Moves output carry to return register
3254     __ mr    (R3_RET,  R10);
3255 
3256     __ blr();
3257 
3258     return start;
3259   }
3260 
3261   /**
3262   *  Arguments:
3263   *
3264   *  Input:
3265   *   R3_ARG1    - in address
3266   *   R4_ARG2    - in length
3267   *   R5_ARG3    - out address
3268   *   R6_ARG4    - out length
3269   */
3270   address generate_squareToLen() {
3271     __ align(CodeEntryAlignment);
3272     StubCodeMark mark(this, "StubRoutines", "squareToLen");
3273 
3274     address start = __ function_entry();
3275 
3276     // args - higher word is cleaned (unsignedly) due to int to long casting
3277     const Register in        = R3_ARG1;
3278     const Register in_len    = R4_ARG2;
3279     __ clrldi(in_len, in_len, 32);
3280     const Register out       = R5_ARG3;
3281     const Register out_len   = R6_ARG4;
3282     __ clrldi(out_len, out_len, 32);
3283 
3284     // output
3285     const Register ret       = R3_RET;
3286 
3287     // temporaries
3288     const Register lplw_s    = R7;
3289     const Register in_aux    = R8;
3290     const Register out_aux   = R9;
3291     const Register piece     = R10;
3292     const Register product   = R14;
3293     const Register lplw      = R15;
3294     const Register i_minus1  = R16;
3295     const Register carry     = R17;
3296     const Register offset    = R18;
3297     const Register off_aux   = R19;
3298     const Register t         = R20;
3299     const Register mlen      = R21;
3300     const Register len       = R22;
3301     const Register a         = R23;
3302     const Register b         = R24;
3303     const Register i         = R25;
3304     const Register c         = R26;
3305     const Register cs        = R27;
3306 
3307     // Labels
3308     Label SKIP_LSHIFT, SKIP_DIAGONAL_SUM, SKIP_ADDONE, SKIP_MULADD, SKIP_LOOP_SQUARE;
3309     Label LOOP_LSHIFT, LOOP_DIAGONAL_SUM, LOOP_ADDONE, LOOP_MULADD, LOOP_SQUARE;
3310 
3311     // Save non-volatile regs (frameless).
3312     int current_offs = -8;
3313     __ std(R28, current_offs, R1_SP); current_offs -= 8;
3314     __ std(R27, current_offs, R1_SP); current_offs -= 8;
3315     __ std(R26, current_offs, R1_SP); current_offs -= 8;
3316     __ std(R25, current_offs, R1_SP); current_offs -= 8;
3317     __ std(R24, current_offs, R1_SP); current_offs -= 8;
3318     __ std(R23, current_offs, R1_SP); current_offs -= 8;
3319     __ std(R22, current_offs, R1_SP); current_offs -= 8;
3320     __ std(R21, current_offs, R1_SP); current_offs -= 8;
3321     __ std(R20, current_offs, R1_SP); current_offs -= 8;
3322     __ std(R19, current_offs, R1_SP); current_offs -= 8;
3323     __ std(R18, current_offs, R1_SP); current_offs -= 8;
3324     __ std(R17, current_offs, R1_SP); current_offs -= 8;
3325     __ std(R16, current_offs, R1_SP); current_offs -= 8;
3326     __ std(R15, current_offs, R1_SP); current_offs -= 8;
3327     __ std(R14, current_offs, R1_SP);
3328 
3329     // Store the squares, right shifted one bit (i.e., divided by 2)
3330     __ subi   (out_aux,   out,       8);
3331     __ subi   (in_aux,    in,        4);
3332     __ cmpwi  (CCR0,      in_len,    0);
3333     // Initialize lplw outside of the loop
3334     __ xorr   (lplw,      lplw,      lplw);
3335     __ ble    (CCR0,      SKIP_LOOP_SQUARE);    // in_len <= 0
3336     __ mtctr  (in_len);
3337 
3338     __ bind(LOOP_SQUARE);
3339     __ lwzu   (piece,     4,         in_aux);
3340     __ mulld  (product,   piece,     piece);
3341     // shift left 63 bits and only keep the MSB
3342     __ rldic  (lplw_s,    lplw,      63, 0);
3343     __ mr     (lplw,      product);
3344     // shift right 1 bit without sign extension
3345     __ srdi   (product,   product,   1);
3346     // join them to the same register and store it
3347     __ orr    (product,   lplw_s,    product);
3348 #ifdef VM_LITTLE_ENDIAN
3349     // Swap low and high words for little endian
3350     __ rldicl (product,   product,   32, 0);
3351 #endif
3352     __ stdu   (product,   8,         out_aux);
3353     __ bdnz   (LOOP_SQUARE);
3354 
3355     __ bind(SKIP_LOOP_SQUARE);
3356 
3357     // Add in off-diagonal sums
3358     __ cmpwi  (CCR0,      in_len,    0);
3359     __ ble    (CCR0,      SKIP_DIAGONAL_SUM);
3360     // Avoid CTR usage here in order to use it at mulAdd
3361     __ subi   (i_minus1,  in_len,    1);
3362     __ li     (offset,    4);
3363 
3364     __ bind(LOOP_DIAGONAL_SUM);
3365 
3366     __ sldi   (off_aux,   out_len,   2);
3367     __ sub    (off_aux,   off_aux,   offset);
3368 
3369     __ mr     (len,       i_minus1);
3370     __ sldi   (mlen,      i_minus1,  2);
3371     __ lwzx   (t,         in,        mlen);
3372 
3373     __ muladd (out, in, off_aux, len, t, a, b, carry);
3374 
3375     // begin<addOne>
3376     // off_aux = out_len*4 - 4 - mlen - offset*4 - 4;
3377     __ addi   (mlen,      mlen,      4);
3378     __ sldi   (a,         out_len,   2);
3379     __ subi   (a,         a,         4);
3380     __ sub    (a,         a,         mlen);
3381     __ subi   (off_aux,   offset,    4);
3382     __ sub    (off_aux,   a,         off_aux);
3383 
3384     __ lwzx   (b,         off_aux,   out);
3385     __ add    (b,         b,         carry);
3386     __ stwx   (b,         off_aux,   out);
3387 
3388     // if (((uint64_t)s >> 32) != 0) {
3389     __ srdi_  (a,         b,         32);
3390     __ beq    (CCR0,      SKIP_ADDONE);
3391 
3392     // while (--mlen >= 0) {
3393     __ bind(LOOP_ADDONE);
3394     __ subi   (mlen,      mlen,      4);
3395     __ cmpwi  (CCR0,      mlen,      0);
3396     __ beq    (CCR0,      SKIP_ADDONE);
3397 
3398     // if (--offset_aux < 0) { // Carry out of number
3399     __ subi   (off_aux,   off_aux,   4);
3400     __ cmpwi  (CCR0,      off_aux,   0);
3401     __ blt    (CCR0,      SKIP_ADDONE);
3402 
3403     // } else {
3404     __ lwzx   (b,         off_aux,   out);
3405     __ addi   (b,         b,         1);
3406     __ stwx   (b,         off_aux,   out);
3407     __ cmpwi  (CCR0,      b,         0);
3408     __ bne    (CCR0,      SKIP_ADDONE);
3409     __ b      (LOOP_ADDONE);
3410 
3411     __ bind(SKIP_ADDONE);
3412     // } } } end<addOne>
3413 
3414     __ addi   (offset,    offset,    8);
3415     __ subi   (i_minus1,  i_minus1,  1);
3416     __ cmpwi  (CCR0,      i_minus1,  0);
3417     __ bge    (CCR0,      LOOP_DIAGONAL_SUM);
3418 
3419     __ bind(SKIP_DIAGONAL_SUM);
3420 
3421     // Shift back up and set low bit
3422     // Shifts 1 bit left up to len positions. Assumes no leading zeros
3423     // begin<primitiveLeftShift>
3424     __ cmpwi  (CCR0,      out_len,   0);
3425     __ ble    (CCR0,      SKIP_LSHIFT);
3426     __ li     (i,         0);
3427     __ lwz    (c,         0,         out);
3428     __ subi   (b,         out_len,   1);
3429     __ mtctr  (b);
3430 
3431     __ bind(LOOP_LSHIFT);
3432     __ mr     (b,         c);
3433     __ addi   (cs,        i,         4);
3434     __ lwzx   (c,         out,       cs);
3435 
3436     __ sldi   (b,         b,         1);
3437     __ srwi   (cs,        c,         31);
3438     __ orr    (b,         b,         cs);
3439     __ stwx   (b,         i,         out);
3440 
3441     __ addi   (i,         i,         4);
3442     __ bdnz   (LOOP_LSHIFT);
3443 
3444     __ sldi   (c,         out_len,   2);
3445     __ subi   (c,         c,         4);
3446     __ lwzx   (b,         out,       c);
3447     __ sldi   (b,         b,         1);
3448     __ stwx   (b,         out,       c);
3449 
3450     __ bind(SKIP_LSHIFT);
3451     // end<primitiveLeftShift>
3452 
3453     // Set low bit
3454     __ sldi   (i,         in_len,    2);
3455     __ subi   (i,         i,         4);
3456     __ lwzx   (i,         in,        i);
3457     __ sldi   (c,         out_len,   2);
3458     __ subi   (c,         c,         4);
3459     __ lwzx   (b,         out,       c);
3460 
3461     __ andi   (i,         i,         1);
3462     __ orr    (i,         b,         i);
3463 
3464     __ stwx   (i,         out,       c);
3465 
3466     // Restore non-volatile regs.
3467     current_offs = -8;
3468     __ ld(R28, current_offs, R1_SP); current_offs -= 8;
3469     __ ld(R27, current_offs, R1_SP); current_offs -= 8;
3470     __ ld(R26, current_offs, R1_SP); current_offs -= 8;
3471     __ ld(R25, current_offs, R1_SP); current_offs -= 8;
3472     __ ld(R24, current_offs, R1_SP); current_offs -= 8;
3473     __ ld(R23, current_offs, R1_SP); current_offs -= 8;
3474     __ ld(R22, current_offs, R1_SP); current_offs -= 8;
3475     __ ld(R21, current_offs, R1_SP); current_offs -= 8;
3476     __ ld(R20, current_offs, R1_SP); current_offs -= 8;
3477     __ ld(R19, current_offs, R1_SP); current_offs -= 8;
3478     __ ld(R18, current_offs, R1_SP); current_offs -= 8;
3479     __ ld(R17, current_offs, R1_SP); current_offs -= 8;
3480     __ ld(R16, current_offs, R1_SP); current_offs -= 8;
3481     __ ld(R15, current_offs, R1_SP); current_offs -= 8;
3482     __ ld(R14, current_offs, R1_SP);
3483 
3484     __ mr(ret, out);
3485     __ blr();
3486 
3487     return start;
3488   }
3489 
3490   /**
3491    * Arguments:
3492    *
3493    * Inputs:
3494    *   R3_ARG1    - int   crc
3495    *   R4_ARG2    - byte* buf
3496    *   R5_ARG3    - int   length (of buffer)
3497    *
3498    * scratch:
3499    *   R2, R6-R12
3500    *
3501    * Ouput:
3502    *   R3_RET     - int   crc result
3503    */
3504   // Compute CRC32 function.
3505   address generate_CRC32_updateBytes(const char* name) {
3506     __ align(CodeEntryAlignment);
3507     StubCodeMark mark(this, "StubRoutines", name);
3508     address start = __ function_entry();  // Remember stub start address (is rtn value).
3509 
3510     const Register table   = R6;       // crc table address
3511 
3512 #ifdef VM_LITTLE_ENDIAN
3513     // arguments to kernel_crc32:
3514     const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3515     const Register data    = R4_ARG2;  // source byte array
3516     const Register dataLen = R5_ARG3;  // #bytes to process
3517 
3518     if (VM_Version::has_vpmsumb()) {
3519       const Register constants    = R2;  // constants address
3520       const Register bconstants   = R8;  // barret table address
3521 
3522       const Register t0      = R9;
3523       const Register t1      = R10;
3524       const Register t2      = R11;
3525       const Register t3      = R12;
3526       const Register t4      = R7;
3527 
3528       BLOCK_COMMENT("Stub body {");
3529       assert_different_registers(crc, data, dataLen, table);
3530 
3531       StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3532       StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
3533       StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
3534 
3535       __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
3536 
3537       BLOCK_COMMENT("return");
3538       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3539       __ blr();
3540 
3541       BLOCK_COMMENT("} Stub body");
3542     } else
3543 #endif
3544     {
3545       StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3546       generate_CRC_updateBytes(name, table, true);
3547     }
3548 
3549     return start;
3550   }
3551 
3552 
3553   /**
3554    * Arguments:
3555    *
3556    * Inputs:
3557    *   R3_ARG1    - int   crc
3558    *   R4_ARG2    - byte* buf
3559    *   R5_ARG3    - int   length (of buffer)
3560    *
3561    * scratch:
3562    *   R2, R6-R12
3563    *
3564    * Ouput:
3565    *   R3_RET     - int   crc result
3566    */
3567   // Compute CRC32C function.
3568   address generate_CRC32C_updateBytes(const char* name) {
3569     __ align(CodeEntryAlignment);
3570     StubCodeMark mark(this, "StubRoutines", name);
3571     address start = __ function_entry();  // Remember stub start address (is rtn value).
3572 
3573     const Register table   = R6;       // crc table address
3574 
3575 #if 0   // no vector support yet for CRC32C
3576 #ifdef VM_LITTLE_ENDIAN
3577     // arguments to kernel_crc32:
3578     const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3579     const Register data    = R4_ARG2;  // source byte array
3580     const Register dataLen = R5_ARG3;  // #bytes to process
3581 
3582     if (VM_Version::has_vpmsumb()) {
3583       const Register constants    = R2;  // constants address
3584       const Register bconstants   = R8;  // barret table address
3585 
3586       const Register t0      = R9;
3587       const Register t1      = R10;
3588       const Register t2      = R11;
3589       const Register t3      = R12;
3590       const Register t4      = R7;
3591 
3592       BLOCK_COMMENT("Stub body {");
3593       assert_different_registers(crc, data, dataLen, table);
3594 
3595       StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
3596       StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
3597       StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
3598 
3599       __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
3600 
3601       BLOCK_COMMENT("return");
3602       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3603       __ blr();
3604 
3605       BLOCK_COMMENT("} Stub body");
3606     } else
3607 #endif
3608 #endif
3609     {
3610       StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
3611       generate_CRC_updateBytes(name, table, false);
3612     }
3613 
3614     return start;
3615   }
3616 
3617 
3618   // Initialization
3619   void generate_initial() {
3620     // Generates all stubs and initializes the entry points
3621 
3622     // Entry points that exist in all platforms.
3623     // Note: This is code that could be shared among different platforms - however the
3624     // benefit seems to be smaller than the disadvantage of having a
3625     // much more complicated generator structure. See also comment in
3626     // stubRoutines.hpp.
3627 
3628     StubRoutines::_forward_exception_entry          = generate_forward_exception();
3629     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
3630     StubRoutines::_catch_exception_entry            = generate_catch_exception();
3631 
3632     // Build this early so it's available for the interpreter.
3633     StubRoutines::_throw_StackOverflowError_entry   =
3634       generate_throw_exception("StackOverflowError throw_exception",
3635                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
3636     StubRoutines::_throw_delayed_StackOverflowError_entry =
3637       generate_throw_exception("delayed StackOverflowError throw_exception",
3638                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
3639 
3640     // CRC32 Intrinsics.
3641     if (UseCRC32Intrinsics) {
3642       StubRoutines::_crc_table_adr    = (address)StubRoutines::ppc64::_crc_table;
3643       StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
3644     }
3645 
3646     // CRC32C Intrinsics.
3647     if (UseCRC32CIntrinsics) {
3648       StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
3649       StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
3650     }
3651   }
3652 
3653   void generate_all() {
3654     // Generates all stubs and initializes the entry points
3655 
3656     // These entry points require SharedInfo::stack0 to be set up in
3657     // non-core builds
3658     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3659     // Handle IncompatibleClassChangeError in itable stubs.
3660     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3661     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3662 
3663     // support for verify_oop (must happen after universe_init)
3664     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
3665 
3666     // arraycopy stubs used by compilers
3667     generate_arraycopy_stubs();
3668 
3669     // Safefetch stubs.
3670     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
3671                                                        &StubRoutines::_safefetch32_fault_pc,
3672                                                        &StubRoutines::_safefetch32_continuation_pc);
3673     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3674                                                        &StubRoutines::_safefetchN_fault_pc,
3675                                                        &StubRoutines::_safefetchN_continuation_pc);
3676 
3677 #ifdef COMPILER2
3678     if (UseMultiplyToLenIntrinsic) {
3679       StubRoutines::_multiplyToLen = generate_multiplyToLen();
3680     }
3681 #endif
3682 
3683     if (UseSquareToLenIntrinsic) {
3684       StubRoutines::_squareToLen = generate_squareToLen();
3685     }
3686     if (UseMulAddIntrinsic) {
3687       StubRoutines::_mulAdd = generate_mulAdd();
3688     }
3689     if (UseMontgomeryMultiplyIntrinsic) {
3690       StubRoutines::_montgomeryMultiply
3691         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
3692     }
3693     if (UseMontgomerySquareIntrinsic) {
3694       StubRoutines::_montgomerySquare
3695         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
3696     }
3697 
3698     if (UseAESIntrinsics) {
3699       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3700       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3701     }
3702 
3703     if (UseSHA256Intrinsics) {
3704       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
3705       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
3706     }
3707     if (UseSHA512Intrinsics) {
3708       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
3709       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
3710     }
3711   }
3712 
3713  public:
3714   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3715     // replace the standard masm with a special one:
3716     _masm = new MacroAssembler(code);
3717     if (all) {
3718       generate_all();
3719     } else {
3720       generate_initial();
3721     }
3722   }
3723 };
3724 
3725 void StubGenerator_generate(CodeBuffer* code, bool all) {
3726   StubGenerator g(code, all);
3727 }