1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "nativeInst_ppc.hpp"
  30 #include "oops/instanceOop.hpp"
  31 #include "oops/method.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/frame.inline.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubCodeGenerator.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/top.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 
  43 #define __ _masm->
  44 
  45 #ifdef PRODUCT
  46 #define BLOCK_COMMENT(str) // nothing
  47 #else
  48 #define BLOCK_COMMENT(str) __ block_comment(str)
  49 #endif
  50 
  51 class StubGenerator: public StubCodeGenerator {
  52  private:
  53 
  54   // Call stubs are used to call Java from C
  55   //
  56   // Arguments:
  57   //
  58   //   R3  - call wrapper address     : address
  59   //   R4  - result                   : intptr_t*
  60   //   R5  - result type              : BasicType
  61   //   R6  - method                   : Method
  62   //   R7  - frame mgr entry point    : address
  63   //   R8  - parameter block          : intptr_t*
  64   //   R9  - parameter count in words : int
  65   //   R10 - thread                   : Thread*
  66   //
  67   address generate_call_stub(address& return_address) {
  68     // Setup a new c frame, copy java arguments, call frame manager or
  69     // native_entry, and process result.
  70 
  71     StubCodeMark mark(this, "StubRoutines", "call_stub");
  72 
  73     address start = __ function_entry();
  74 
  75     // some sanity checks
  76     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
  77     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
  78     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
  79     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
  80     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
  81 
  82     Register r_arg_call_wrapper_addr        = R3;
  83     Register r_arg_result_addr              = R4;
  84     Register r_arg_result_type              = R5;
  85     Register r_arg_method                   = R6;
  86     Register r_arg_entry                    = R7;
  87     Register r_arg_thread                   = R10;
  88 
  89     Register r_temp                         = R24;
  90     Register r_top_of_arguments_addr        = R25;
  91     Register r_entryframe_fp                = R26;
  92 
  93     {
  94       // Stack on entry to call_stub:
  95       //
  96       //      F1      [C_FRAME]
  97       //              ...
  98 
  99       Register r_arg_argument_addr          = R8;
 100       Register r_arg_argument_count         = R9;
 101       Register r_frame_alignment_in_bytes   = R27;
 102       Register r_argument_addr              = R28;
 103       Register r_argumentcopy_addr          = R29;
 104       Register r_argument_size_in_bytes     = R30;
 105       Register r_frame_size                 = R23;
 106 
 107       Label arguments_copied;
 108 
 109       // Save LR/CR to caller's C_FRAME.
 110       __ save_LR_CR(R0);
 111 
 112       // Zero extend arg_argument_count.
 113       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
 114 
 115       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
 116       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 117 
 118       // Keep copy of our frame pointer (caller's SP).
 119       __ mr(r_entryframe_fp, R1_SP);
 120 
 121       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
 122       // Push ENTRY_FRAME including arguments:
 123       //
 124       //      F0      [TOP_IJAVA_FRAME_ABI]
 125       //              alignment (optional)
 126       //              [outgoing Java arguments]
 127       //              [ENTRY_FRAME_LOCALS]
 128       //      F1      [C_FRAME]
 129       //              ...
 130 
 131       // calculate frame size
 132 
 133       // unaligned size of arguments
 134       __ sldi(r_argument_size_in_bytes,
 135                   r_arg_argument_count, Interpreter::logStackElementSize);
 136       // arguments alignment (max 1 slot)
 137       // FIXME: use round_to() here
 138       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
 139       __ sldi(r_frame_alignment_in_bytes,
 140               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
 141 
 142       // size = unaligned size of arguments + top abi's size
 143       __ addi(r_frame_size, r_argument_size_in_bytes,
 144               frame::top_ijava_frame_abi_size);
 145       // size += arguments alignment
 146       __ add(r_frame_size,
 147              r_frame_size, r_frame_alignment_in_bytes);
 148       // size += size of call_stub locals
 149       __ addi(r_frame_size,
 150               r_frame_size, frame::entry_frame_locals_size);
 151 
 152       // push ENTRY_FRAME
 153       __ push_frame(r_frame_size, r_temp);
 154 
 155       // initialize call_stub locals (step 1)
 156       __ std(r_arg_call_wrapper_addr,
 157              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
 158       __ std(r_arg_result_addr,
 159              _entry_frame_locals_neg(result_address), r_entryframe_fp);
 160       __ std(r_arg_result_type,
 161              _entry_frame_locals_neg(result_type), r_entryframe_fp);
 162       // we will save arguments_tos_address later
 163 
 164 
 165       BLOCK_COMMENT("Copy Java arguments");
 166       // copy Java arguments
 167 
 168       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
 169       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
 170       __ addi(r_top_of_arguments_addr,
 171               R1_SP, frame::top_ijava_frame_abi_size);
 172       __ add(r_top_of_arguments_addr,
 173              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
 174 
 175       // any arguments to copy?
 176       __ cmpdi(CCR0, r_arg_argument_count, 0);
 177       __ beq(CCR0, arguments_copied);
 178 
 179       // prepare loop and copy arguments in reverse order
 180       {
 181         // init CTR with arg_argument_count
 182         __ mtctr(r_arg_argument_count);
 183 
 184         // let r_argumentcopy_addr point to last outgoing Java arguments P
 185         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
 186 
 187         // let r_argument_addr point to last incoming java argument
 188         __ add(r_argument_addr,
 189                    r_arg_argument_addr, r_argument_size_in_bytes);
 190         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 191 
 192         // now loop while CTR > 0 and copy arguments
 193         {
 194           Label next_argument;
 195           __ bind(next_argument);
 196 
 197           __ ld(r_temp, 0, r_argument_addr);
 198           // argument_addr--;
 199           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 200           __ std(r_temp, 0, r_argumentcopy_addr);
 201           // argumentcopy_addr++;
 202           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
 203 
 204           __ bdnz(next_argument);
 205         }
 206       }
 207 
 208       // Arguments copied, continue.
 209       __ bind(arguments_copied);
 210     }
 211 
 212     {
 213       BLOCK_COMMENT("Call frame manager or native entry.");
 214       // Call frame manager or native entry.
 215       Register r_new_arg_entry = R14;
 216       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
 217                                  r_arg_method, r_arg_thread);
 218 
 219       __ mr(r_new_arg_entry, r_arg_entry);
 220 
 221       // Register state on entry to frame manager / native entry:
 222       //
 223       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
 224       //   R19_method  -  Method
 225       //   R16_thread  -  JavaThread*
 226 
 227       // Tos must point to last argument - element_size.
 228 #ifdef CC_INTERP
 229       const Register tos = R17_tos;
 230 #else
 231       const Register tos = R15_esp;
 232 #endif
 233       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
 234 
 235       // initialize call_stub locals (step 2)
 236       // now save tos as arguments_tos_address
 237       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
 238 
 239       // load argument registers for call
 240       __ mr(R19_method, r_arg_method);
 241       __ mr(R16_thread, r_arg_thread);
 242       assert(tos != r_arg_method, "trashed r_arg_method");
 243       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
 244 
 245       // Set R15_prev_state to 0 for simplifying checks in callee.
 246 #ifdef CC_INTERP
 247       __ li(R15_prev_state, 0);
 248 #else
 249       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
 250 #endif
 251       // Stack on entry to frame manager / native entry:
 252       //
 253       //      F0      [TOP_IJAVA_FRAME_ABI]
 254       //              alignment (optional)
 255       //              [outgoing Java arguments]
 256       //              [ENTRY_FRAME_LOCALS]
 257       //      F1      [C_FRAME]
 258       //              ...
 259       //
 260 
 261       // global toc register
 262       __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
 263 
 264       // Load narrow oop base.
 265       __ reinit_heapbase(R30, R11_scratch1);
 266 
 267       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
 268       // when called via a c2i.
 269 
 270       // Pass initial_caller_sp to framemanager.
 271       __ mr(R21_tmp1, R1_SP);
 272 
 273       // Do a light-weight C-call here, r_new_arg_entry holds the address
 274       // of the interpreter entry point (frame manager or native entry)
 275       // and save runtime-value of LR in return_address.
 276       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
 277              "trashed r_new_arg_entry");
 278       return_address = __ call_stub(r_new_arg_entry);
 279     }
 280 
 281     {
 282       BLOCK_COMMENT("Returned from frame manager or native entry.");
 283       // Returned from frame manager or native entry.
 284       // Now pop frame, process result, and return to caller.
 285 
 286       // Stack on exit from frame manager / native entry:
 287       //
 288       //      F0      [ABI]
 289       //              ...
 290       //              [ENTRY_FRAME_LOCALS]
 291       //      F1      [C_FRAME]
 292       //              ...
 293       //
 294       // Just pop the topmost frame ...
 295       //
 296 
 297       Label ret_is_object;
 298       Label ret_is_long;
 299       Label ret_is_float;
 300       Label ret_is_double;
 301 
 302       Register r_entryframe_fp = R30;
 303       Register r_lr            = R7_ARG5;
 304       Register r_cr            = R8_ARG6;
 305 
 306       // Reload some volatile registers which we've spilled before the call
 307       // to frame manager / native entry.
 308       // Access all locals via frame pointer, because we know nothing about
 309       // the topmost frame's size.
 310       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
 311       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
 312       __ ld(r_arg_result_addr,
 313             _entry_frame_locals_neg(result_address), r_entryframe_fp);
 314       __ ld(r_arg_result_type,
 315             _entry_frame_locals_neg(result_type), r_entryframe_fp);
 316       __ ld(r_cr, _abi(cr), r_entryframe_fp);
 317       __ ld(r_lr, _abi(lr), r_entryframe_fp);
 318 
 319       // pop frame and restore non-volatiles, LR and CR
 320       __ mr(R1_SP, r_entryframe_fp);
 321       __ mtcr(r_cr);
 322       __ mtlr(r_lr);
 323 
 324       // Store result depending on type. Everything that is not
 325       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
 326       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
 327       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
 328       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
 329       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
 330 
 331       // restore non-volatile registers
 332       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 333 
 334 
 335       // Stack on exit from call_stub:
 336       //
 337       //      0       [C_FRAME]
 338       //              ...
 339       //
 340       //  no call_stub frames left.
 341 
 342       // All non-volatiles have been restored at this point!!
 343       assert(R3_RET == R3, "R3_RET should be R3");
 344 
 345       __ beq(CCR0, ret_is_object);
 346       __ beq(CCR1, ret_is_long);
 347       __ beq(CCR5, ret_is_float);
 348       __ beq(CCR6, ret_is_double);
 349 
 350       // default:
 351       __ stw(R3_RET, 0, r_arg_result_addr);
 352       __ blr(); // return to caller
 353 
 354       // case T_OBJECT:
 355       __ bind(ret_is_object);
 356       __ std(R3_RET, 0, r_arg_result_addr);
 357       __ blr(); // return to caller
 358 
 359       // case T_LONG:
 360       __ bind(ret_is_long);
 361       __ std(R3_RET, 0, r_arg_result_addr);
 362       __ blr(); // return to caller
 363 
 364       // case T_FLOAT:
 365       __ bind(ret_is_float);
 366       __ stfs(F1_RET, 0, r_arg_result_addr);
 367       __ blr(); // return to caller
 368 
 369       // case T_DOUBLE:
 370       __ bind(ret_is_double);
 371       __ stfd(F1_RET, 0, r_arg_result_addr);
 372       __ blr(); // return to caller
 373     }
 374 
 375     return start;
 376   }
 377 
 378   // Return point for a Java call if there's an exception thrown in
 379   // Java code.  The exception is caught and transformed into a
 380   // pending exception stored in JavaThread that can be tested from
 381   // within the VM.
 382   //
 383   address generate_catch_exception() {
 384     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 385 
 386     address start = __ pc();
 387 
 388     // Registers alive
 389     //
 390     //  R16_thread
 391     //  R3_ARG1 - address of pending exception
 392     //  R4_ARG2 - return address in call stub
 393 
 394     const Register exception_file = R21_tmp1;
 395     const Register exception_line = R22_tmp2;
 396 
 397     __ load_const(exception_file, (void*)__FILE__);
 398     __ load_const(exception_line, (void*)__LINE__);
 399 
 400     __ std(R3_ARG1, thread_(pending_exception));
 401     // store into `char *'
 402     __ std(exception_file, thread_(exception_file));
 403     // store into `int'
 404     __ stw(exception_line, thread_(exception_line));
 405 
 406     // complete return to VM
 407     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 408 
 409     __ mtlr(R4_ARG2);
 410     // continue in call stub
 411     __ blr();
 412 
 413     return start;
 414   }
 415 
 416   // Continuation point for runtime calls returning with a pending
 417   // exception.  The pending exception check happened in the runtime
 418   // or native call stub.  The pending exception in Thread is
 419   // converted into a Java-level exception.
 420   //
 421   address generate_forward_exception() {
 422     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 423     address start = __ pc();
 424 
 425 #if !defined(PRODUCT)
 426     if (VerifyOops) {
 427       // Get pending exception oop.
 428       __ ld(R3_ARG1,
 429                 in_bytes(Thread::pending_exception_offset()),
 430                 R16_thread);
 431       // Make sure that this code is only executed if there is a pending exception.
 432       {
 433         Label L;
 434         __ cmpdi(CCR0, R3_ARG1, 0);
 435         __ bne(CCR0, L);
 436         __ stop("StubRoutines::forward exception: no pending exception (1)");
 437         __ bind(L);
 438       }
 439       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
 440     }
 441 #endif
 442 
 443     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
 444     __ save_LR_CR(R4_ARG2);
 445     __ push_frame_reg_args(0, R0);
 446     // Find exception handler.
 447     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 448                      SharedRuntime::exception_handler_for_return_address),
 449                     R16_thread,
 450                     R4_ARG2);
 451     // Copy handler's address.
 452     __ mtctr(R3_RET);
 453     __ pop_frame();
 454     __ restore_LR_CR(R0);
 455 
 456     // Set up the arguments for the exception handler:
 457     //  - R3_ARG1: exception oop
 458     //  - R4_ARG2: exception pc.
 459 
 460     // Load pending exception oop.
 461     __ ld(R3_ARG1,
 462               in_bytes(Thread::pending_exception_offset()),
 463               R16_thread);
 464 
 465     // The exception pc is the return address in the caller.
 466     // Must load it into R4_ARG2.
 467     __ mflr(R4_ARG2);
 468 
 469 #ifdef ASSERT
 470     // Make sure exception is set.
 471     {
 472       Label L;
 473       __ cmpdi(CCR0, R3_ARG1, 0);
 474       __ bne(CCR0, L);
 475       __ stop("StubRoutines::forward exception: no pending exception (2)");
 476       __ bind(L);
 477     }
 478 #endif
 479 
 480     // Clear the pending exception.
 481     __ li(R0, 0);
 482     __ std(R0,
 483                in_bytes(Thread::pending_exception_offset()),
 484                R16_thread);
 485     // Jump to exception handler.
 486     __ bctr();
 487 
 488     return start;
 489   }
 490 
 491 #undef __
 492 #define __ masm->
 493   // Continuation point for throwing of implicit exceptions that are
 494   // not handled in the current activation. Fabricates an exception
 495   // oop and initiates normal exception dispatching in this
 496   // frame. Only callee-saved registers are preserved (through the
 497   // normal register window / RegisterMap handling).  If the compiler
 498   // needs all registers to be preserved between the fault point and
 499   // the exception handler then it must assume responsibility for that
 500   // in AbstractCompiler::continuation_for_implicit_null_exception or
 501   // continuation_for_implicit_division_by_zero_exception. All other
 502   // implicit exceptions (e.g., NullPointerException or
 503   // AbstractMethodError on entry) are either at call sites or
 504   // otherwise assume that stack unwinding will be initiated, so
 505   // caller saved registers were assumed volatile in the compiler.
 506   //
 507   // Note that we generate only this stub into a RuntimeStub, because
 508   // it needs to be properly traversed and ignored during GC, so we
 509   // change the meaning of the "__" macro within this method.
 510   //
 511   // Note: the routine set_pc_not_at_call_for_caller in
 512   // SharedRuntime.cpp requires that this code be generated into a
 513   // RuntimeStub.
 514   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 515                                    Register arg1 = noreg, Register arg2 = noreg) {
 516     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
 517     MacroAssembler* masm = new MacroAssembler(&code);
 518 
 519     OopMapSet* oop_maps  = new OopMapSet();
 520     int frame_size_in_bytes = frame::abi_reg_args_size;
 521     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
 522 
 523     StubCodeMark mark(this, "StubRoutines", "throw_exception");
 524 
 525     address start = __ pc();
 526 
 527     __ save_LR_CR(R11_scratch1);
 528 
 529     // Push a frame.
 530     __ push_frame_reg_args(0, R11_scratch1);
 531 
 532     address frame_complete_pc = __ pc();
 533 
 534     if (restore_saved_exception_pc) {
 535       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
 536     }
 537 
 538     // Note that we always have a runtime stub frame on the top of
 539     // stack by this point. Remember the offset of the instruction
 540     // whose address will be moved to R11_scratch1.
 541     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
 542 
 543     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
 544 
 545     __ mr(R3_ARG1, R16_thread);
 546     if (arg1 != noreg) {
 547       __ mr(R4_ARG2, arg1);
 548     }
 549     if (arg2 != noreg) {
 550       __ mr(R5_ARG3, arg2);
 551     }
 552 #if defined(ABI_ELFv2)
 553     __ call_c(runtime_entry, relocInfo::none);
 554 #else
 555     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
 556 #endif
 557 
 558     // Set an oopmap for the call site.
 559     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
 560 
 561     __ reset_last_Java_frame();
 562 
 563 #ifdef ASSERT
 564     // Make sure that this code is only executed if there is a pending
 565     // exception.
 566     {
 567       Label L;
 568       __ ld(R0,
 569                 in_bytes(Thread::pending_exception_offset()),
 570                 R16_thread);
 571       __ cmpdi(CCR0, R0, 0);
 572       __ bne(CCR0, L);
 573       __ stop("StubRoutines::throw_exception: no pending exception");
 574       __ bind(L);
 575     }
 576 #endif
 577 
 578     // Pop frame.
 579     __ pop_frame();
 580 
 581     __ restore_LR_CR(R11_scratch1);
 582 
 583     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
 584     __ mtctr(R11_scratch1);
 585     __ bctr();
 586 
 587     // Create runtime stub with OopMap.
 588     RuntimeStub* stub =
 589       RuntimeStub::new_runtime_stub(name, &code,
 590                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
 591                                     frame_size_in_bytes/wordSize,
 592                                     oop_maps,
 593                                     false);
 594     return stub->entry_point();
 595   }
 596 #undef __
 597 #define __ _masm->
 598 
 599   //  Generate G1 pre-write barrier for array.
 600   //
 601   //  Input:
 602   //     from     - register containing src address (only needed for spilling)
 603   //     to       - register containing starting address
 604   //     count    - register containing element count
 605   //     tmp      - scratch register
 606   //
 607   //  Kills:
 608   //     nothing
 609   //
 610   void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) {
 611     BarrierSet* const bs = Universe::heap()->barrier_set();
 612     switch (bs->kind()) {
 613       case BarrierSet::G1SATBCT:
 614       case BarrierSet::G1SATBCTLogging:
 615         // With G1, don't generate the call if we statically know that the target in uninitialized
 616         if (!dest_uninitialized) {
 617           const int spill_slots = 4 * wordSize;
 618           const int frame_size  = frame::abi_reg_args_size + spill_slots;
 619           Label filtered;
 620 
 621           // Is marking active?
 622           if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
 623             __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
 624           } else {
 625             guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
 626             __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
 627           }
 628           __ cmpdi(CCR0, Rtmp1, 0);
 629           __ beq(CCR0, filtered);
 630 
 631           __ save_LR_CR(R0);
 632           __ push_frame_reg_args(spill_slots, R0);
 633           __ std(from,  frame_size - 1 * wordSize, R1_SP);
 634           __ std(to,    frame_size - 2 * wordSize, R1_SP);
 635           __ std(count, frame_size - 3 * wordSize, R1_SP);
 636 
 637           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
 638 
 639           __ ld(from,  frame_size - 1 * wordSize, R1_SP);
 640           __ ld(to,    frame_size - 2 * wordSize, R1_SP);
 641           __ ld(count, frame_size - 3 * wordSize, R1_SP);
 642           __ pop_frame();
 643           __ restore_LR_CR(R0);
 644 
 645           __ bind(filtered);
 646         }
 647         break;
 648       case BarrierSet::CardTableModRef:
 649       case BarrierSet::CardTableExtension:
 650       case BarrierSet::ModRef:
 651         break;
 652       default:
 653         ShouldNotReachHere();
 654     }
 655   }
 656 
 657   //  Generate CMS/G1 post-write barrier for array.
 658   //
 659   //  Input:
 660   //     addr     - register containing starting address
 661   //     count    - register containing element count
 662   //     tmp      - scratch register
 663   //
 664   //  The input registers and R0 are overwritten.
 665   //
 666   void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) {
 667     BarrierSet* const bs = Universe::heap()->barrier_set();
 668 
 669     switch (bs->kind()) {
 670       case BarrierSet::G1SATBCT:
 671       case BarrierSet::G1SATBCTLogging:
 672         {
 673           if (branchToEnd) {
 674             __ save_LR_CR(R0);
 675             // We need this frame only to spill LR.
 676             __ push_frame_reg_args(0, R0);
 677             __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
 678             __ pop_frame();
 679             __ restore_LR_CR(R0);
 680           } else {
 681             // Tail call: fake call from stub caller by branching without linking.
 682             address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
 683             __ mr_if_needed(R3_ARG1, addr);
 684             __ mr_if_needed(R4_ARG2, count);
 685             __ load_const(R11, entry_point, R0);
 686             __ call_c_and_return_to_caller(R11);
 687           }
 688         }
 689         break;
 690       case BarrierSet::CardTableModRef:
 691       case BarrierSet::CardTableExtension:
 692         {
 693           Label Lskip_loop, Lstore_loop;
 694           if (UseConcMarkSweepGC) {
 695             // TODO PPC port: contribute optimization / requires shared changes
 696             __ release();
 697           }
 698 
 699           CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
 700           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 701           assert_different_registers(addr, count, tmp);
 702 
 703           __ sldi(count, count, LogBytesPerHeapOop);
 704           __ addi(count, count, -BytesPerHeapOop);
 705           __ add(count, addr, count);
 706           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 707           __ srdi(addr, addr, CardTableModRefBS::card_shift);
 708           __ srdi(count, count, CardTableModRefBS::card_shift);
 709           __ subf(count, addr, count);
 710           assert_different_registers(R0, addr, count, tmp);
 711           __ load_const(tmp, (address)ct->byte_map_base);
 712           __ addic_(count, count, 1);
 713           __ beq(CCR0, Lskip_loop);
 714           __ li(R0, 0);
 715           __ mtctr(count);
 716           // Byte store loop
 717           __ bind(Lstore_loop);
 718           __ stbx(R0, tmp, addr);
 719           __ addi(addr, addr, 1);
 720           __ bdnz(Lstore_loop);
 721           __ bind(Lskip_loop);
 722 
 723           if (!branchToEnd) __ blr();
 724         }
 725       break;
 726       case BarrierSet::ModRef:
 727         if (!branchToEnd) __ blr();
 728         break;
 729       default:
 730         ShouldNotReachHere();
 731     }
 732   }
 733 
 734   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
 735   //
 736   // Arguments:
 737   //   to:
 738   //   count:
 739   //
 740   // Destroys:
 741   //
 742   address generate_zero_words_aligned8() {
 743     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
 744 
 745     // Implemented as in ClearArray.
 746     address start = __ function_entry();
 747 
 748     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
 749     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
 750     Register tmp1_reg       = R5_ARG3;
 751     Register tmp2_reg       = R6_ARG4;
 752     Register zero_reg       = R7_ARG5;
 753 
 754     // Procedure for large arrays (uses data cache block zero instruction).
 755     Label dwloop, fast, fastloop, restloop, lastdword, done;
 756     int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords);
 757     int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
 758 
 759     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
 760     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
 761     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
 762     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
 763     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
 764 
 765     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
 766     __ beq(CCR0, lastdword);                    // size <= 1
 767     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
 768     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
 769     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
 770 
 771     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
 772     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
 773 
 774     __ beq(CCR0, fast);                         // already 128byte aligned
 775     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
 776     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
 777 
 778     // Clear in first cache line dword-by-dword if not already 128byte aligned.
 779     __ bind(dwloop);
 780       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 781       __ addi(base_ptr_reg, base_ptr_reg, 8);
 782     __ bdnz(dwloop);
 783 
 784     // clear 128byte blocks
 785     __ bind(fast);
 786     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
 787     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
 788 
 789     __ mtctr(tmp1_reg);                         // load counter
 790     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
 791     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
 792 
 793     __ bind(fastloop);
 794       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
 795       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
 796     __ bdnz(fastloop);
 797 
 798     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
 799     __ beq(CCR0, lastdword);                    // rest<=1
 800     __ mtctr(tmp1_reg);                         // load counter
 801 
 802     // Clear rest.
 803     __ bind(restloop);
 804       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 805       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
 806       __ addi(base_ptr_reg, base_ptr_reg, 16);
 807     __ bdnz(restloop);
 808 
 809     __ bind(lastdword);
 810     __ beq(CCR1, done);
 811     __ std(zero_reg, 0, base_ptr_reg);
 812     __ bind(done);
 813     __ blr();                                   // return
 814 
 815     return start;
 816   }
 817 
 818   // The following routine generates a subroutine to throw an asynchronous
 819   // UnknownError when an unsafe access gets a fault that could not be
 820   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 821   //
 822   address generate_handler_for_unsafe_access() {
 823     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 824     address start = __ function_entry();
 825     __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
 826     return start;
 827   }
 828 
 829 #if !defined(PRODUCT)
 830   // Wrapper which calls oopDesc::is_oop_or_null()
 831   // Only called by MacroAssembler::verify_oop
 832   static void verify_oop_helper(const char* message, oop o) {
 833     if (!o->is_oop_or_null()) {
 834       fatal(message);
 835     }
 836     ++ StubRoutines::_verify_oop_count;
 837   }
 838 #endif
 839 
 840   // Return address of code to be called from code generated by
 841   // MacroAssembler::verify_oop.
 842   //
 843   // Don't generate, rather use C++ code.
 844   address generate_verify_oop() {
 845     StubCodeMark mark(this, "StubRoutines", "verify_oop");
 846 
 847     // this is actually a `FunctionDescriptor*'.
 848     address start = 0;
 849 
 850 #if !defined(PRODUCT)
 851     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 852 #endif
 853 
 854     return start;
 855   }
 856 
 857   // Fairer handling of safepoints for native methods.
 858   //
 859   // Generate code which reads from the polling page. This special handling is needed as the
 860   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 861   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 862   // to read from the safepoint polling page.
 863   address generate_load_from_poll() {
 864     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 865     address start = __ function_entry();
 866     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 867     return start;
 868   }
 869 
 870   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 871   //
 872   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 873   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 874   //
 875   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 876   // for turning on loop predication optimization, and hence the behavior of "array range check"
 877   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 878   //
 879   // Generate stub for disjoint short fill. If "aligned" is true, the
 880   // "to" address is assumed to be heapword aligned.
 881   //
 882   // Arguments for generated stub:
 883   //   to:    R3_ARG1
 884   //   value: R4_ARG2
 885   //   count: R5_ARG3 treated as signed
 886   //
 887   address generate_fill(BasicType t, bool aligned, const char* name) {
 888     StubCodeMark mark(this, "StubRoutines", name);
 889     address start = __ function_entry();
 890 
 891     const Register to    = R3_ARG1;   // source array address
 892     const Register value = R4_ARG2;   // fill value
 893     const Register count = R5_ARG3;   // elements count
 894     const Register temp  = R6_ARG4;   // temp register
 895 
 896     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
 897 
 898     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
 899     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
 900 
 901     int shift = -1;
 902     switch (t) {
 903        case T_BYTE:
 904         shift = 2;
 905         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 906         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
 907         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 908         __ blt(CCR0, L_fill_elements);
 909         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 910         break;
 911        case T_SHORT:
 912         shift = 1;
 913         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 914         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 915         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 916         __ blt(CCR0, L_fill_elements);
 917         break;
 918       case T_INT:
 919         shift = 0;
 920         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 921         __ blt(CCR0, L_fill_4_bytes);
 922         break;
 923       default: ShouldNotReachHere();
 924     }
 925 
 926     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
 927       // Align source address at 4 bytes address boundary.
 928       if (t == T_BYTE) {
 929         // One byte misalignment happens only for byte arrays.
 930         __ andi_(temp, to, 1);
 931         __ beq(CCR0, L_skip_align1);
 932         __ stb(value, 0, to);
 933         __ addi(to, to, 1);
 934         __ addi(count, count, -1);
 935         __ bind(L_skip_align1);
 936       }
 937       // Two bytes misalignment happens only for byte and short (char) arrays.
 938       __ andi_(temp, to, 2);
 939       __ beq(CCR0, L_skip_align2);
 940       __ sth(value, 0, to);
 941       __ addi(to, to, 2);
 942       __ addi(count, count, -(1 << (shift - 1)));
 943       __ bind(L_skip_align2);
 944     }
 945 
 946     if (!aligned) {
 947       // Align to 8 bytes, we know we are 4 byte aligned to start.
 948       __ andi_(temp, to, 7);
 949       __ beq(CCR0, L_fill_32_bytes);
 950       __ stw(value, 0, to);
 951       __ addi(to, to, 4);
 952       __ addi(count, count, -(1 << shift));
 953       __ bind(L_fill_32_bytes);
 954     }
 955 
 956     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
 957     // Clone bytes int->long as above.
 958     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
 959 
 960     Label L_check_fill_8_bytes;
 961     // Fill 32-byte chunks.
 962     __ subf_(count, temp, count);
 963     __ blt(CCR0, L_check_fill_8_bytes);
 964 
 965     Label L_fill_32_bytes_loop;
 966     __ align(32);
 967     __ bind(L_fill_32_bytes_loop);
 968 
 969     __ std(value, 0, to);
 970     __ std(value, 8, to);
 971     __ subf_(count, temp, count);           // Update count.
 972     __ std(value, 16, to);
 973     __ std(value, 24, to);
 974 
 975     __ addi(to, to, 32);
 976     __ bge(CCR0, L_fill_32_bytes_loop);
 977 
 978     __ bind(L_check_fill_8_bytes);
 979     __ add_(count, temp, count);
 980     __ beq(CCR0, L_exit);
 981     __ addic_(count, count, -(2 << shift));
 982     __ blt(CCR0, L_fill_4_bytes);
 983 
 984     //
 985     // Length is too short, just fill 8 bytes at a time.
 986     //
 987     Label L_fill_8_bytes_loop;
 988     __ bind(L_fill_8_bytes_loop);
 989     __ std(value, 0, to);
 990     __ addic_(count, count, -(2 << shift));
 991     __ addi(to, to, 8);
 992     __ bge(CCR0, L_fill_8_bytes_loop);
 993 
 994     // Fill trailing 4 bytes.
 995     __ bind(L_fill_4_bytes);
 996     __ andi_(temp, count, 1<<shift);
 997     __ beq(CCR0, L_fill_2_bytes);
 998 
 999     __ stw(value, 0, to);
1000     if (t == T_BYTE || t == T_SHORT) {
1001       __ addi(to, to, 4);
1002       // Fill trailing 2 bytes.
1003       __ bind(L_fill_2_bytes);
1004       __ andi_(temp, count, 1<<(shift-1));
1005       __ beq(CCR0, L_fill_byte);
1006       __ sth(value, 0, to);
1007       if (t == T_BYTE) {
1008         __ addi(to, to, 2);
1009         // Fill trailing byte.
1010         __ bind(L_fill_byte);
1011         __ andi_(count, count, 1);
1012         __ beq(CCR0, L_exit);
1013         __ stb(value, 0, to);
1014       } else {
1015         __ bind(L_fill_byte);
1016       }
1017     } else {
1018       __ bind(L_fill_2_bytes);
1019     }
1020     __ bind(L_exit);
1021     __ blr();
1022 
1023     // Handle copies less than 8 bytes. Int is handled elsewhere.
1024     if (t == T_BYTE) {
1025       __ bind(L_fill_elements);
1026       Label L_fill_2, L_fill_4;
1027       __ andi_(temp, count, 1);
1028       __ beq(CCR0, L_fill_2);
1029       __ stb(value, 0, to);
1030       __ addi(to, to, 1);
1031       __ bind(L_fill_2);
1032       __ andi_(temp, count, 2);
1033       __ beq(CCR0, L_fill_4);
1034       __ stb(value, 0, to);
1035       __ stb(value, 0, to);
1036       __ addi(to, to, 2);
1037       __ bind(L_fill_4);
1038       __ andi_(temp, count, 4);
1039       __ beq(CCR0, L_exit);
1040       __ stb(value, 0, to);
1041       __ stb(value, 1, to);
1042       __ stb(value, 2, to);
1043       __ stb(value, 3, to);
1044       __ blr();
1045     }
1046 
1047     if (t == T_SHORT) {
1048       Label L_fill_2;
1049       __ bind(L_fill_elements);
1050       __ andi_(temp, count, 1);
1051       __ beq(CCR0, L_fill_2);
1052       __ sth(value, 0, to);
1053       __ addi(to, to, 2);
1054       __ bind(L_fill_2);
1055       __ andi_(temp, count, 2);
1056       __ beq(CCR0, L_exit);
1057       __ sth(value, 0, to);
1058       __ sth(value, 2, to);
1059       __ blr();
1060     }
1061     return start;
1062   }
1063 
1064 
1065   // Generate overlap test for array copy stubs.
1066   //
1067   // Input:
1068   //   R3_ARG1    -  from
1069   //   R4_ARG2    -  to
1070   //   R5_ARG3    -  element count
1071   //
1072   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1073     Register tmp1 = R6_ARG4;
1074     Register tmp2 = R7_ARG5;
1075 
1076     Label l_overlap;
1077 #ifdef ASSERT
1078     __ srdi_(tmp2, R5_ARG3, 31);
1079     __ asm_assert_eq("missing zero extend", 0xAFFE);
1080 #endif
1081 
1082     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1083     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1084     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1085     __ cmpld(CCR1, tmp1, tmp2);
1086     __ crand(/*CCR0 lt*/0, /*CCR1 lt*/4+0, /*CCR0 lt*/0);
1087     __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size.
1088 
1089     // need to copy forwards
1090     if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
1091       __ b(no_overlap_target);
1092     } else {
1093       __ load_const(tmp1, no_overlap_target, tmp2);
1094       __ mtctr(tmp1);
1095       __ bctr();
1096     }
1097 
1098     __ bind(l_overlap);
1099     // need to copy backwards
1100   }
1101 
1102   // The guideline in the implementations of generate_disjoint_xxx_copy
1103   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1104   // single instructions, but to avoid alignment interrupts (see subsequent
1105   // comment). Furthermore, we try to minimize misaligned access, even
1106   // though they cause no alignment interrupt.
1107   //
1108   // In Big-Endian mode, the PowerPC architecture requires implementations to
1109   // handle automatically misaligned integer halfword and word accesses,
1110   // word-aligned integer doubleword accesses, and word-aligned floating-point
1111   // accesses. Other accesses may or may not generate an Alignment interrupt
1112   // depending on the implementation.
1113   // Alignment interrupt handling may require on the order of hundreds of cycles,
1114   // so every effort should be made to avoid misaligned memory values.
1115   //
1116   //
1117   // Generate stub for disjoint byte copy.  If "aligned" is true, the
1118   // "from" and "to" addresses are assumed to be heapword aligned.
1119   //
1120   // Arguments for generated stub:
1121   //      from:  R3_ARG1
1122   //      to:    R4_ARG2
1123   //      count: R5_ARG3 treated as signed
1124   //
1125   address generate_disjoint_byte_copy(bool aligned, const char * name) {
1126     StubCodeMark mark(this, "StubRoutines", name);
1127     address start = __ function_entry();
1128 
1129     Register tmp1 = R6_ARG4;
1130     Register tmp2 = R7_ARG5;
1131     Register tmp3 = R8_ARG6;
1132     Register tmp4 = R9_ARG7;
1133 
1134 
1135     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1136     // Don't try anything fancy if arrays don't have many elements.
1137     __ li(tmp3, 0);
1138     __ cmpwi(CCR0, R5_ARG3, 17);
1139     __ ble(CCR0, l_6); // copy 4 at a time
1140 
1141     if (!aligned) {
1142       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1143       __ andi_(tmp1, tmp1, 3);
1144       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1145 
1146       // Copy elements if necessary to align to 4 bytes.
1147       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1148       __ andi_(tmp1, tmp1, 3);
1149       __ beq(CCR0, l_2);
1150 
1151       __ subf(R5_ARG3, tmp1, R5_ARG3);
1152       __ bind(l_9);
1153       __ lbz(tmp2, 0, R3_ARG1);
1154       __ addic_(tmp1, tmp1, -1);
1155       __ stb(tmp2, 0, R4_ARG2);
1156       __ addi(R3_ARG1, R3_ARG1, 1);
1157       __ addi(R4_ARG2, R4_ARG2, 1);
1158       __ bne(CCR0, l_9);
1159 
1160       __ bind(l_2);
1161     }
1162 
1163     // copy 8 elements at a time
1164     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1165     __ andi_(tmp1, tmp2, 7);
1166     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1167 
1168     // copy a 2-element word if necessary to align to 8 bytes
1169     __ andi_(R0, R3_ARG1, 7);
1170     __ beq(CCR0, l_7);
1171 
1172     __ lwzx(tmp2, R3_ARG1, tmp3);
1173     __ addi(R5_ARG3, R5_ARG3, -4);
1174     __ stwx(tmp2, R4_ARG2, tmp3);
1175     { // FasterArrayCopy
1176       __ addi(R3_ARG1, R3_ARG1, 4);
1177       __ addi(R4_ARG2, R4_ARG2, 4);
1178     }
1179     __ bind(l_7);
1180 
1181     { // FasterArrayCopy
1182       __ cmpwi(CCR0, R5_ARG3, 31);
1183       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1184 
1185       __ srdi(tmp1, R5_ARG3, 5);
1186       __ andi_(R5_ARG3, R5_ARG3, 31);
1187       __ mtctr(tmp1);
1188 
1189       __ bind(l_8);
1190       // Use unrolled version for mass copying (copy 32 elements a time)
1191       // Load feeding store gets zero latency on Power6, however not on Power5.
1192       // Therefore, the following sequence is made for the good of both.
1193       __ ld(tmp1, 0, R3_ARG1);
1194       __ ld(tmp2, 8, R3_ARG1);
1195       __ ld(tmp3, 16, R3_ARG1);
1196       __ ld(tmp4, 24, R3_ARG1);
1197       __ std(tmp1, 0, R4_ARG2);
1198       __ std(tmp2, 8, R4_ARG2);
1199       __ std(tmp3, 16, R4_ARG2);
1200       __ std(tmp4, 24, R4_ARG2);
1201       __ addi(R3_ARG1, R3_ARG1, 32);
1202       __ addi(R4_ARG2, R4_ARG2, 32);
1203       __ bdnz(l_8);
1204     }
1205 
1206     __ bind(l_6);
1207 
1208     // copy 4 elements at a time
1209     __ cmpwi(CCR0, R5_ARG3, 4);
1210     __ blt(CCR0, l_1);
1211     __ srdi(tmp1, R5_ARG3, 2);
1212     __ mtctr(tmp1); // is > 0
1213     __ andi_(R5_ARG3, R5_ARG3, 3);
1214 
1215     { // FasterArrayCopy
1216       __ addi(R3_ARG1, R3_ARG1, -4);
1217       __ addi(R4_ARG2, R4_ARG2, -4);
1218       __ bind(l_3);
1219       __ lwzu(tmp2, 4, R3_ARG1);
1220       __ stwu(tmp2, 4, R4_ARG2);
1221       __ bdnz(l_3);
1222       __ addi(R3_ARG1, R3_ARG1, 4);
1223       __ addi(R4_ARG2, R4_ARG2, 4);
1224     }
1225 
1226     // do single element copy
1227     __ bind(l_1);
1228     __ cmpwi(CCR0, R5_ARG3, 0);
1229     __ beq(CCR0, l_4);
1230 
1231     { // FasterArrayCopy
1232       __ mtctr(R5_ARG3);
1233       __ addi(R3_ARG1, R3_ARG1, -1);
1234       __ addi(R4_ARG2, R4_ARG2, -1);
1235 
1236       __ bind(l_5);
1237       __ lbzu(tmp2, 1, R3_ARG1);
1238       __ stbu(tmp2, 1, R4_ARG2);
1239       __ bdnz(l_5);
1240     }
1241 
1242     __ bind(l_4);
1243     __ blr();
1244 
1245     return start;
1246   }
1247 
1248   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1249   // "from" and "to" addresses are assumed to be heapword aligned.
1250   //
1251   // Arguments for generated stub:
1252   //      from:  R3_ARG1
1253   //      to:    R4_ARG2
1254   //      count: R5_ARG3 treated as signed
1255   //
1256   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1257     StubCodeMark mark(this, "StubRoutines", name);
1258     address start = __ function_entry();
1259 
1260     Register tmp1 = R6_ARG4;
1261     Register tmp2 = R7_ARG5;
1262     Register tmp3 = R8_ARG6;
1263 
1264 #if defined(ABI_ELFv2)
1265      address nooverlap_target = aligned ?
1266        StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
1267        StubRoutines::jbyte_disjoint_arraycopy();
1268 #else
1269     address nooverlap_target = aligned ?
1270       ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
1271       ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
1272 #endif
1273 
1274     array_overlap_test(nooverlap_target, 0);
1275     // Do reverse copy. We assume the case of actual overlap is rare enough
1276     // that we don't have to optimize it.
1277     Label l_1, l_2;
1278 
1279     __ b(l_2);
1280     __ bind(l_1);
1281     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1282     __ bind(l_2);
1283     __ addic_(R5_ARG3, R5_ARG3, -1);
1284     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1285     __ bge(CCR0, l_1);
1286 
1287     __ blr();
1288 
1289     return start;
1290   }
1291 
1292   // Generate stub for disjoint short copy.  If "aligned" is true, the
1293   // "from" and "to" addresses are assumed to be heapword aligned.
1294   //
1295   // Arguments for generated stub:
1296   //      from:  R3_ARG1
1297   //      to:    R4_ARG2
1298   //  elm.count: R5_ARG3 treated as signed
1299   //
1300   // Strategy for aligned==true:
1301   //
1302   //  If length <= 9:
1303   //     1. copy 2 elements at a time (l_6)
1304   //     2. copy last element if original element count was odd (l_1)
1305   //
1306   //  If length > 9:
1307   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1308   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1309   //     3. copy last element if one was left in step 2. (l_1)
1310   //
1311   //
1312   // Strategy for aligned==false:
1313   //
1314   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1315   //                  can be unaligned (see comment below)
1316   //
1317   //  If length > 9:
1318   //     1. continue with step 6. if the alignment of from and to mod 4
1319   //        is different.
1320   //     2. align from and to to 4 bytes by copying 1 element if necessary
1321   //     3. at l_2 from and to are 4 byte aligned; continue with
1322   //        5. if they cannot be aligned to 8 bytes because they have
1323   //        got different alignment mod 8.
1324   //     4. at this point we know that both, from and to, have the same
1325   //        alignment mod 8, now copy one element if necessary to get
1326   //        8 byte alignment of from and to.
1327   //     5. copy 4 elements at a time until less than 4 elements are
1328   //        left; depending on step 3. all load/stores are aligned or
1329   //        either all loads or all stores are unaligned.
1330   //     6. copy 2 elements at a time until less than 2 elements are
1331   //        left (l_6); arriving here from step 1., there is a chance
1332   //        that all accesses are unaligned.
1333   //     7. copy last element if one was left in step 6. (l_1)
1334   //
1335   //  There are unaligned data accesses using integer load/store
1336   //  instructions in this stub. POWER allows such accesses.
1337   //
1338   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1339   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1340   //  integer load/stores have good performance. Only unaligned
1341   //  floating point load/stores can have poor performance.
1342   //
1343   //  TODO:
1344   //
1345   //  1. check if aligning the backbranch target of loops is beneficial
1346   //
1347   address generate_disjoint_short_copy(bool aligned, const char * name) {
1348     StubCodeMark mark(this, "StubRoutines", name);
1349 
1350     Register tmp1 = R6_ARG4;
1351     Register tmp2 = R7_ARG5;
1352     Register tmp3 = R8_ARG6;
1353     Register tmp4 = R9_ARG7;
1354 
1355     VectorSRegister tmp_vsr1  = VSR1;
1356     VectorSRegister tmp_vsr2  = VSR2;
1357 
1358     address start = __ function_entry();
1359 
1360     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1361 
1362     // don't try anything fancy if arrays don't have many elements
1363     __ li(tmp3, 0);
1364     __ cmpwi(CCR0, R5_ARG3, 9);
1365     __ ble(CCR0, l_6); // copy 2 at a time
1366 
1367     if (!aligned) {
1368       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1369       __ andi_(tmp1, tmp1, 3);
1370       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1371 
1372       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1373 
1374       // Copy 1 element if necessary to align to 4 bytes.
1375       __ andi_(tmp1, R3_ARG1, 3);
1376       __ beq(CCR0, l_2);
1377 
1378       __ lhz(tmp2, 0, R3_ARG1);
1379       __ addi(R3_ARG1, R3_ARG1, 2);
1380       __ sth(tmp2, 0, R4_ARG2);
1381       __ addi(R4_ARG2, R4_ARG2, 2);
1382       __ addi(R5_ARG3, R5_ARG3, -1);
1383       __ bind(l_2);
1384 
1385       // At this point the positions of both, from and to, are at least 4 byte aligned.
1386 
1387       // Copy 4 elements at a time.
1388       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1389       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1390       __ andi_(tmp1, tmp2, 7);
1391       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1392 
1393       // Copy a 2-element word if necessary to align to 8 bytes.
1394       __ andi_(R0, R3_ARG1, 7);
1395       __ beq(CCR0, l_7);
1396 
1397       __ lwzx(tmp2, R3_ARG1, tmp3);
1398       __ addi(R5_ARG3, R5_ARG3, -2);
1399       __ stwx(tmp2, R4_ARG2, tmp3);
1400       { // FasterArrayCopy
1401         __ addi(R3_ARG1, R3_ARG1, 4);
1402         __ addi(R4_ARG2, R4_ARG2, 4);
1403       }
1404     }
1405 
1406     __ bind(l_7);
1407 
1408     // Copy 4 elements at a time; either the loads or the stores can
1409     // be unaligned if aligned == false.
1410 
1411     { // FasterArrayCopy
1412       __ cmpwi(CCR0, R5_ARG3, 15);
1413       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1414 
1415       __ srdi(tmp1, R5_ARG3, 4);
1416       __ andi_(R5_ARG3, R5_ARG3, 15);
1417       __ mtctr(tmp1);
1418 
1419       if (!VM_Version::has_vsx()) {
1420 
1421         __ bind(l_8);
1422         // Use unrolled version for mass copying (copy 16 elements a time).
1423         // Load feeding store gets zero latency on Power6, however not on Power5.
1424         // Therefore, the following sequence is made for the good of both.
1425         __ ld(tmp1, 0, R3_ARG1);
1426         __ ld(tmp2, 8, R3_ARG1);
1427         __ ld(tmp3, 16, R3_ARG1);
1428         __ ld(tmp4, 24, R3_ARG1);
1429         __ std(tmp1, 0, R4_ARG2);
1430         __ std(tmp2, 8, R4_ARG2);
1431         __ std(tmp3, 16, R4_ARG2);
1432         __ std(tmp4, 24, R4_ARG2);
1433         __ addi(R3_ARG1, R3_ARG1, 32);
1434         __ addi(R4_ARG2, R4_ARG2, 32);
1435         __ bdnz(l_8);
1436 
1437       } else { // Processor supports VSX, so use it to mass copy.
1438 
1439         // Prefetch src data into L2 cache.
1440         __ dcbt(R3_ARG1, 0);
1441 
1442         // If supported set DSCR pre-fetch to deepest.
1443         if (VM_Version::has_mfdscr()) {
1444           __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1445           __ mtdscr(tmp2);
1446         }
1447         __ li(tmp1, 16);
1448 
1449         // Backbranch target aligned to 32-byte. It's not aligned 16-byte
1450         // as loop contains < 8 instructions that fit inside a single
1451         // i-cache sector.
1452         __ align(32);
1453 
1454         __ bind(l_9);
1455         // Use loop with VSX load/store instructions to
1456         // copy 16 elements a time.
1457         __ lxvd2x(tmp_vsr1, 0, R3_ARG1);     // Load from src.
1458         __ stxvd2x(tmp_vsr1, 0, R4_ARG2);    // Store to dst.
1459         __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1);  // Load from src + 16.
1460         __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
1461         __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32.
1462         __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32.
1463         __ bdnz(l_9);                        // Dec CTR and loop if not zero.
1464 
1465         // Restore DSCR pre-fetch value.
1466         if (VM_Version::has_mfdscr()) {
1467           __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1468           __ mtdscr(tmp2);
1469         }
1470 
1471       }
1472     } // FasterArrayCopy
1473     __ bind(l_6);
1474 
1475     // copy 2 elements at a time
1476     { // FasterArrayCopy
1477       __ cmpwi(CCR0, R5_ARG3, 2);
1478       __ blt(CCR0, l_1);
1479       __ srdi(tmp1, R5_ARG3, 1);
1480       __ andi_(R5_ARG3, R5_ARG3, 1);
1481 
1482       __ addi(R3_ARG1, R3_ARG1, -4);
1483       __ addi(R4_ARG2, R4_ARG2, -4);
1484       __ mtctr(tmp1);
1485 
1486       __ bind(l_3);
1487       __ lwzu(tmp2, 4, R3_ARG1);
1488       __ stwu(tmp2, 4, R4_ARG2);
1489       __ bdnz(l_3);
1490 
1491       __ addi(R3_ARG1, R3_ARG1, 4);
1492       __ addi(R4_ARG2, R4_ARG2, 4);
1493     }
1494 
1495     // do single element copy
1496     __ bind(l_1);
1497     __ cmpwi(CCR0, R5_ARG3, 0);
1498     __ beq(CCR0, l_4);
1499 
1500     { // FasterArrayCopy
1501       __ mtctr(R5_ARG3);
1502       __ addi(R3_ARG1, R3_ARG1, -2);
1503       __ addi(R4_ARG2, R4_ARG2, -2);
1504 
1505       __ bind(l_5);
1506       __ lhzu(tmp2, 2, R3_ARG1);
1507       __ sthu(tmp2, 2, R4_ARG2);
1508       __ bdnz(l_5);
1509     }
1510     __ bind(l_4);
1511     __ blr();
1512 
1513     return start;
1514   }
1515 
1516   // Generate stub for conjoint short copy.  If "aligned" is true, the
1517   // "from" and "to" addresses are assumed to be heapword aligned.
1518   //
1519   // Arguments for generated stub:
1520   //      from:  R3_ARG1
1521   //      to:    R4_ARG2
1522   //      count: R5_ARG3 treated as signed
1523   //
1524   address generate_conjoint_short_copy(bool aligned, const char * name) {
1525     StubCodeMark mark(this, "StubRoutines", name);
1526     address start = __ function_entry();
1527 
1528     Register tmp1 = R6_ARG4;
1529     Register tmp2 = R7_ARG5;
1530     Register tmp3 = R8_ARG6;
1531 
1532 #if defined(ABI_ELFv2)
1533     address nooverlap_target = aligned ?
1534         StubRoutines::arrayof_jshort_disjoint_arraycopy() :
1535         StubRoutines::jshort_disjoint_arraycopy();
1536 #else
1537     address nooverlap_target = aligned ?
1538         ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
1539         ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
1540 #endif
1541 
1542     array_overlap_test(nooverlap_target, 1);
1543 
1544     Label l_1, l_2;
1545     __ sldi(tmp1, R5_ARG3, 1);
1546     __ b(l_2);
1547     __ bind(l_1);
1548     __ sthx(tmp2, R4_ARG2, tmp1);
1549     __ bind(l_2);
1550     __ addic_(tmp1, tmp1, -2);
1551     __ lhzx(tmp2, R3_ARG1, tmp1);
1552     __ bge(CCR0, l_1);
1553 
1554     __ blr();
1555 
1556     return start;
1557   }
1558 
1559   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1560   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1561   //
1562   // Arguments:
1563   //      from:  R3_ARG1
1564   //      to:    R4_ARG2
1565   //      count: R5_ARG3 treated as signed
1566   //
1567   void generate_disjoint_int_copy_core(bool aligned) {
1568     Register tmp1 = R6_ARG4;
1569     Register tmp2 = R7_ARG5;
1570     Register tmp3 = R8_ARG6;
1571     Register tmp4 = R0;
1572 
1573     Label l_1, l_2, l_3, l_4, l_5, l_6;
1574     // for short arrays, just do single element copy
1575     __ li(tmp3, 0);
1576     __ cmpwi(CCR0, R5_ARG3, 5);
1577     __ ble(CCR0, l_2);
1578 
1579     if (!aligned) {
1580         // check if arrays have same alignment mod 8.
1581         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1582         __ andi_(R0, tmp1, 7);
1583         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1584         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1585 
1586         // copy 1 element to align to and from on an 8 byte boundary
1587         __ andi_(R0, R3_ARG1, 7);
1588         __ beq(CCR0, l_4);
1589 
1590         __ lwzx(tmp2, R3_ARG1, tmp3);
1591         __ addi(R5_ARG3, R5_ARG3, -1);
1592         __ stwx(tmp2, R4_ARG2, tmp3);
1593         { // FasterArrayCopy
1594           __ addi(R3_ARG1, R3_ARG1, 4);
1595           __ addi(R4_ARG2, R4_ARG2, 4);
1596         }
1597         __ bind(l_4);
1598       }
1599 
1600     { // FasterArrayCopy
1601       __ cmpwi(CCR0, R5_ARG3, 7);
1602       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1603 
1604       __ srdi(tmp1, R5_ARG3, 3);
1605       __ andi_(R5_ARG3, R5_ARG3, 7);
1606       __ mtctr(tmp1);
1607 
1608       __ bind(l_6);
1609       // Use unrolled version for mass copying (copy 8 elements a time).
1610       // Load feeding store gets zero latency on power6, however not on power 5.
1611       // Therefore, the following sequence is made for the good of both.
1612       __ ld(tmp1, 0, R3_ARG1);
1613       __ ld(tmp2, 8, R3_ARG1);
1614       __ ld(tmp3, 16, R3_ARG1);
1615       __ ld(tmp4, 24, R3_ARG1);
1616       __ std(tmp1, 0, R4_ARG2);
1617       __ std(tmp2, 8, R4_ARG2);
1618       __ std(tmp3, 16, R4_ARG2);
1619       __ std(tmp4, 24, R4_ARG2);
1620       __ addi(R3_ARG1, R3_ARG1, 32);
1621       __ addi(R4_ARG2, R4_ARG2, 32);
1622       __ bdnz(l_6);
1623     }
1624 
1625     // copy 1 element at a time
1626     __ bind(l_2);
1627     __ cmpwi(CCR0, R5_ARG3, 0);
1628     __ beq(CCR0, l_1);
1629 
1630     { // FasterArrayCopy
1631       __ mtctr(R5_ARG3);
1632       __ addi(R3_ARG1, R3_ARG1, -4);
1633       __ addi(R4_ARG2, R4_ARG2, -4);
1634 
1635       __ bind(l_3);
1636       __ lwzu(tmp2, 4, R3_ARG1);
1637       __ stwu(tmp2, 4, R4_ARG2);
1638       __ bdnz(l_3);
1639     }
1640 
1641     __ bind(l_1);
1642     return;
1643   }
1644 
1645   // Generate stub for disjoint int copy.  If "aligned" is true, the
1646   // "from" and "to" addresses are assumed to be heapword aligned.
1647   //
1648   // Arguments for generated stub:
1649   //      from:  R3_ARG1
1650   //      to:    R4_ARG2
1651   //      count: R5_ARG3 treated as signed
1652   //
1653   address generate_disjoint_int_copy(bool aligned, const char * name) {
1654     StubCodeMark mark(this, "StubRoutines", name);
1655     address start = __ function_entry();
1656     generate_disjoint_int_copy_core(aligned);
1657     __ blr();
1658     return start;
1659   }
1660 
1661   // Generate core code for conjoint int copy (and oop copy on
1662   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1663   // are assumed to be heapword aligned.
1664   //
1665   // Arguments:
1666   //      from:  R3_ARG1
1667   //      to:    R4_ARG2
1668   //      count: R5_ARG3 treated as signed
1669   //
1670   void generate_conjoint_int_copy_core(bool aligned) {
1671     // Do reverse copy.  We assume the case of actual overlap is rare enough
1672     // that we don't have to optimize it.
1673 
1674     Label l_1, l_2, l_3, l_4, l_5, l_6;
1675 
1676     Register tmp1 = R6_ARG4;
1677     Register tmp2 = R7_ARG5;
1678     Register tmp3 = R8_ARG6;
1679     Register tmp4 = R0;
1680 
1681     { // FasterArrayCopy
1682       __ cmpwi(CCR0, R5_ARG3, 0);
1683       __ beq(CCR0, l_6);
1684 
1685       __ sldi(R5_ARG3, R5_ARG3, 2);
1686       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1687       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1688       __ srdi(R5_ARG3, R5_ARG3, 2);
1689 
1690       __ cmpwi(CCR0, R5_ARG3, 7);
1691       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1692 
1693       __ srdi(tmp1, R5_ARG3, 3);
1694       __ andi(R5_ARG3, R5_ARG3, 7);
1695       __ mtctr(tmp1);
1696 
1697       __ bind(l_4);
1698       // Use unrolled version for mass copying (copy 4 elements a time).
1699       // Load feeding store gets zero latency on Power6, however not on Power5.
1700       // Therefore, the following sequence is made for the good of both.
1701       __ addi(R3_ARG1, R3_ARG1, -32);
1702       __ addi(R4_ARG2, R4_ARG2, -32);
1703       __ ld(tmp4, 24, R3_ARG1);
1704       __ ld(tmp3, 16, R3_ARG1);
1705       __ ld(tmp2, 8, R3_ARG1);
1706       __ ld(tmp1, 0, R3_ARG1);
1707       __ std(tmp4, 24, R4_ARG2);
1708       __ std(tmp3, 16, R4_ARG2);
1709       __ std(tmp2, 8, R4_ARG2);
1710       __ std(tmp1, 0, R4_ARG2);
1711       __ bdnz(l_4);
1712 
1713       __ cmpwi(CCR0, R5_ARG3, 0);
1714       __ beq(CCR0, l_6);
1715 
1716       __ bind(l_5);
1717       __ mtctr(R5_ARG3);
1718       __ bind(l_3);
1719       __ lwz(R0, -4, R3_ARG1);
1720       __ stw(R0, -4, R4_ARG2);
1721       __ addi(R3_ARG1, R3_ARG1, -4);
1722       __ addi(R4_ARG2, R4_ARG2, -4);
1723       __ bdnz(l_3);
1724 
1725       __ bind(l_6);
1726     }
1727   }
1728 
1729   // Generate stub for conjoint int copy.  If "aligned" is true, the
1730   // "from" and "to" addresses are assumed to be heapword aligned.
1731   //
1732   // Arguments for generated stub:
1733   //      from:  R3_ARG1
1734   //      to:    R4_ARG2
1735   //      count: R5_ARG3 treated as signed
1736   //
1737   address generate_conjoint_int_copy(bool aligned, const char * name) {
1738     StubCodeMark mark(this, "StubRoutines", name);
1739     address start = __ function_entry();
1740 
1741 #if defined(ABI_ELFv2)
1742     address nooverlap_target = aligned ?
1743       StubRoutines::arrayof_jint_disjoint_arraycopy() :
1744       StubRoutines::jint_disjoint_arraycopy();
1745 #else
1746     address nooverlap_target = aligned ?
1747       ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
1748       ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
1749 #endif
1750 
1751     array_overlap_test(nooverlap_target, 2);
1752 
1753     generate_conjoint_int_copy_core(aligned);
1754 
1755     __ blr();
1756 
1757     return start;
1758   }
1759 
1760   // Generate core code for disjoint long copy (and oop copy on
1761   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1762   // are assumed to be heapword aligned.
1763   //
1764   // Arguments:
1765   //      from:  R3_ARG1
1766   //      to:    R4_ARG2
1767   //      count: R5_ARG3 treated as signed
1768   //
1769   void generate_disjoint_long_copy_core(bool aligned) {
1770     Register tmp1 = R6_ARG4;
1771     Register tmp2 = R7_ARG5;
1772     Register tmp3 = R8_ARG6;
1773     Register tmp4 = R0;
1774 
1775     Label l_1, l_2, l_3, l_4;
1776 
1777     { // FasterArrayCopy
1778       __ cmpwi(CCR0, R5_ARG3, 3);
1779       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1780 
1781       __ srdi(tmp1, R5_ARG3, 2);
1782       __ andi_(R5_ARG3, R5_ARG3, 3);
1783       __ mtctr(tmp1);
1784 
1785       __ bind(l_4);
1786       // Use unrolled version for mass copying (copy 4 elements a time).
1787       // Load feeding store gets zero latency on Power6, however not on Power5.
1788       // Therefore, the following sequence is made for the good of both.
1789       __ ld(tmp1, 0, R3_ARG1);
1790       __ ld(tmp2, 8, R3_ARG1);
1791       __ ld(tmp3, 16, R3_ARG1);
1792       __ ld(tmp4, 24, R3_ARG1);
1793       __ std(tmp1, 0, R4_ARG2);
1794       __ std(tmp2, 8, R4_ARG2);
1795       __ std(tmp3, 16, R4_ARG2);
1796       __ std(tmp4, 24, R4_ARG2);
1797       __ addi(R3_ARG1, R3_ARG1, 32);
1798       __ addi(R4_ARG2, R4_ARG2, 32);
1799       __ bdnz(l_4);
1800     }
1801 
1802     // copy 1 element at a time
1803     __ bind(l_3);
1804     __ cmpwi(CCR0, R5_ARG3, 0);
1805     __ beq(CCR0, l_1);
1806 
1807     { // FasterArrayCopy
1808       __ mtctr(R5_ARG3);
1809       __ addi(R3_ARG1, R3_ARG1, -8);
1810       __ addi(R4_ARG2, R4_ARG2, -8);
1811 
1812       __ bind(l_2);
1813       __ ldu(R0, 8, R3_ARG1);
1814       __ stdu(R0, 8, R4_ARG2);
1815       __ bdnz(l_2);
1816 
1817     }
1818     __ bind(l_1);
1819   }
1820 
1821   // Generate stub for disjoint long copy.  If "aligned" is true, the
1822   // "from" and "to" addresses are assumed to be heapword aligned.
1823   //
1824   // Arguments for generated stub:
1825   //      from:  R3_ARG1
1826   //      to:    R4_ARG2
1827   //      count: R5_ARG3 treated as signed
1828   //
1829   address generate_disjoint_long_copy(bool aligned, const char * name) {
1830     StubCodeMark mark(this, "StubRoutines", name);
1831     address start = __ function_entry();
1832     generate_disjoint_long_copy_core(aligned);
1833     __ blr();
1834 
1835     return start;
1836   }
1837 
1838   // Generate core code for conjoint long copy (and oop copy on
1839   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1840   // are assumed to be heapword aligned.
1841   //
1842   // Arguments:
1843   //      from:  R3_ARG1
1844   //      to:    R4_ARG2
1845   //      count: R5_ARG3 treated as signed
1846   //
1847   void generate_conjoint_long_copy_core(bool aligned) {
1848     Register tmp1 = R6_ARG4;
1849     Register tmp2 = R7_ARG5;
1850     Register tmp3 = R8_ARG6;
1851     Register tmp4 = R0;
1852 
1853     Label l_1, l_2, l_3, l_4, l_5;
1854 
1855     __ cmpwi(CCR0, R5_ARG3, 0);
1856     __ beq(CCR0, l_1);
1857 
1858     { // FasterArrayCopy
1859       __ sldi(R5_ARG3, R5_ARG3, 3);
1860       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1861       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1862       __ srdi(R5_ARG3, R5_ARG3, 3);
1863 
1864       __ cmpwi(CCR0, R5_ARG3, 3);
1865       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1866 
1867       __ srdi(tmp1, R5_ARG3, 2);
1868       __ andi(R5_ARG3, R5_ARG3, 3);
1869       __ mtctr(tmp1);
1870 
1871       __ bind(l_4);
1872       // Use unrolled version for mass copying (copy 4 elements a time).
1873       // Load feeding store gets zero latency on Power6, however not on Power5.
1874       // Therefore, the following sequence is made for the good of both.
1875       __ addi(R3_ARG1, R3_ARG1, -32);
1876       __ addi(R4_ARG2, R4_ARG2, -32);
1877       __ ld(tmp4, 24, R3_ARG1);
1878       __ ld(tmp3, 16, R3_ARG1);
1879       __ ld(tmp2, 8, R3_ARG1);
1880       __ ld(tmp1, 0, R3_ARG1);
1881       __ std(tmp4, 24, R4_ARG2);
1882       __ std(tmp3, 16, R4_ARG2);
1883       __ std(tmp2, 8, R4_ARG2);
1884       __ std(tmp1, 0, R4_ARG2);
1885       __ bdnz(l_4);
1886 
1887       __ cmpwi(CCR0, R5_ARG3, 0);
1888       __ beq(CCR0, l_1);
1889 
1890       __ bind(l_5);
1891       __ mtctr(R5_ARG3);
1892       __ bind(l_3);
1893       __ ld(R0, -8, R3_ARG1);
1894       __ std(R0, -8, R4_ARG2);
1895       __ addi(R3_ARG1, R3_ARG1, -8);
1896       __ addi(R4_ARG2, R4_ARG2, -8);
1897       __ bdnz(l_3);
1898 
1899     }
1900     __ bind(l_1);
1901   }
1902 
1903   // Generate stub for conjoint long copy.  If "aligned" is true, the
1904   // "from" and "to" addresses are assumed to be heapword aligned.
1905   //
1906   // Arguments for generated stub:
1907   //      from:  R3_ARG1
1908   //      to:    R4_ARG2
1909   //      count: R5_ARG3 treated as signed
1910   //
1911   address generate_conjoint_long_copy(bool aligned, const char * name) {
1912     StubCodeMark mark(this, "StubRoutines", name);
1913     address start = __ function_entry();
1914 
1915 #if defined(ABI_ELFv2)
1916     address nooverlap_target = aligned ?
1917       StubRoutines::arrayof_jlong_disjoint_arraycopy() :
1918       StubRoutines::jlong_disjoint_arraycopy();
1919 #else
1920     address nooverlap_target = aligned ?
1921       ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
1922       ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
1923 #endif
1924 
1925     array_overlap_test(nooverlap_target, 3);
1926     generate_conjoint_long_copy_core(aligned);
1927 
1928     __ blr();
1929 
1930     return start;
1931   }
1932 
1933   // Generate stub for conjoint oop copy.  If "aligned" is true, the
1934   // "from" and "to" addresses are assumed to be heapword aligned.
1935   //
1936   // Arguments for generated stub:
1937   //      from:  R3_ARG1
1938   //      to:    R4_ARG2
1939   //      count: R5_ARG3 treated as signed
1940   //      dest_uninitialized: G1 support
1941   //
1942   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1943     StubCodeMark mark(this, "StubRoutines", name);
1944 
1945     address start = __ function_entry();
1946 
1947 #if defined(ABI_ELFv2)
1948     address nooverlap_target = aligned ?
1949       StubRoutines::arrayof_oop_disjoint_arraycopy() :
1950       StubRoutines::oop_disjoint_arraycopy();
1951 #else
1952     address nooverlap_target = aligned ?
1953       ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
1954       ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
1955 #endif
1956 
1957     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1958 
1959     // Save arguments.
1960     __ mr(R9_ARG7, R4_ARG2);
1961     __ mr(R10_ARG8, R5_ARG3);
1962 
1963     if (UseCompressedOops) {
1964       array_overlap_test(nooverlap_target, 2);
1965       generate_conjoint_int_copy_core(aligned);
1966     } else {
1967       array_overlap_test(nooverlap_target, 3);
1968       generate_conjoint_long_copy_core(aligned);
1969     }
1970 
1971     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
1972     return start;
1973   }
1974 
1975   // Generate stub for disjoint oop copy.  If "aligned" is true, the
1976   // "from" and "to" addresses are assumed to be heapword aligned.
1977   //
1978   // Arguments for generated stub:
1979   //      from:  R3_ARG1
1980   //      to:    R4_ARG2
1981   //      count: R5_ARG3 treated as signed
1982   //      dest_uninitialized: G1 support
1983   //
1984   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1985     StubCodeMark mark(this, "StubRoutines", name);
1986     address start = __ function_entry();
1987 
1988     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1989 
1990     // save some arguments, disjoint_long_copy_core destroys them.
1991     // needed for post barrier
1992     __ mr(R9_ARG7, R4_ARG2);
1993     __ mr(R10_ARG8, R5_ARG3);
1994 
1995     if (UseCompressedOops) {
1996       generate_disjoint_int_copy_core(aligned);
1997     } else {
1998       generate_disjoint_long_copy_core(aligned);
1999     }
2000 
2001     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
2002 
2003     return start;
2004   }
2005 
2006   // Arguments for generated stub (little endian only):
2007   //   R3_ARG1   - source byte array address
2008   //   R4_ARG2   - destination byte array address
2009   //   R5_ARG3   - round key array
2010   address generate_aescrypt_encryptBlock() {
2011     assert(UseAES, "need AES instructions and misaligned SSE support");
2012     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2013 
2014     address start = __ function_entry();
2015 
2016     Label L_doLast;
2017 
2018     Register from           = R3_ARG1;  // source array address
2019     Register to             = R4_ARG2;  // destination array address
2020     Register key            = R5_ARG3;  // round key array
2021 
2022     Register keylen         = R8;
2023     Register temp           = R9;
2024     Register keypos         = R10;
2025     Register hex            = R11;
2026     Register fifteen        = R12;
2027 
2028     VectorRegister vRet     = VR0;
2029 
2030     VectorRegister vKey1    = VR1;
2031     VectorRegister vKey2    = VR2;
2032     VectorRegister vKey3    = VR3;
2033     VectorRegister vKey4    = VR4;
2034 
2035     VectorRegister fromPerm = VR5;
2036     VectorRegister keyPerm  = VR6;
2037     VectorRegister toPerm   = VR7;
2038     VectorRegister fSplt    = VR8;
2039 
2040     VectorRegister vTmp1    = VR9;
2041     VectorRegister vTmp2    = VR10;
2042     VectorRegister vTmp3    = VR11;
2043     VectorRegister vTmp4    = VR12;
2044 
2045     VectorRegister vLow     = VR13;
2046     VectorRegister vHigh    = VR14;
2047 
2048     __ li              (hex, 16);
2049     __ li              (fifteen, 15);
2050     __ vspltisb        (fSplt, 0x0f);
2051 
2052     // load unaligned from[0-15] to vsRet
2053     __ lvx             (vRet, from);
2054     __ lvx             (vTmp1, fifteen, from);
2055     __ lvsl            (fromPerm, from);
2056     __ vxor            (fromPerm, fromPerm, fSplt);
2057     __ vperm           (vRet, vRet, vTmp1, fromPerm);
2058 
2059     // load keylen (44 or 52 or 60)
2060     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2061 
2062     // to load keys
2063     __ lvsr            (keyPerm, key);
2064     __ vxor            (vTmp2, vTmp2, vTmp2);
2065     __ vspltisb        (vTmp2, -16);
2066     __ vrld            (keyPerm, keyPerm, vTmp2);
2067     __ vrld            (keyPerm, keyPerm, vTmp2);
2068     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2069 
2070     // load the 1st round key to vKey1
2071     __ li              (keypos, 0);
2072     __ lvx             (vKey1, keypos, key);
2073     __ addi            (keypos, keypos, 16);
2074     __ lvx             (vTmp1, keypos, key);
2075     __ vperm           (vKey1, vTmp1, vKey1, keyPerm);
2076 
2077     // 1st round
2078     __ vxor (vRet, vRet, vKey1);
2079 
2080     // load the 2nd round key to vKey1
2081     __ addi            (keypos, keypos, 16);
2082     __ lvx             (vTmp2, keypos, key);
2083     __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2084 
2085     // load the 3rd round key to vKey2
2086     __ addi            (keypos, keypos, 16);
2087     __ lvx             (vTmp1, keypos, key);
2088     __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2089 
2090     // load the 4th round key to vKey3
2091     __ addi            (keypos, keypos, 16);
2092     __ lvx             (vTmp2, keypos, key);
2093     __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2094 
2095     // load the 5th round key to vKey4
2096     __ addi            (keypos, keypos, 16);
2097     __ lvx             (vTmp1, keypos, key);
2098     __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2099 
2100     // 2nd - 5th rounds
2101     __ vcipher (vRet, vRet, vKey1);
2102     __ vcipher (vRet, vRet, vKey2);
2103     __ vcipher (vRet, vRet, vKey3);
2104     __ vcipher (vRet, vRet, vKey4);
2105 
2106     // load the 6th round key to vKey1
2107     __ addi            (keypos, keypos, 16);
2108     __ lvx             (vTmp2, keypos, key);
2109     __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2110 
2111     // load the 7th round key to vKey2
2112     __ addi            (keypos, keypos, 16);
2113     __ lvx             (vTmp1, keypos, key);
2114     __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2115 
2116     // load the 8th round key to vKey3
2117     __ addi            (keypos, keypos, 16);
2118     __ lvx             (vTmp2, keypos, key);
2119     __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2120 
2121     // load the 9th round key to vKey4
2122     __ addi            (keypos, keypos, 16);
2123     __ lvx             (vTmp1, keypos, key);
2124     __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2125 
2126     // 6th - 9th rounds
2127     __ vcipher (vRet, vRet, vKey1);
2128     __ vcipher (vRet, vRet, vKey2);
2129     __ vcipher (vRet, vRet, vKey3);
2130     __ vcipher (vRet, vRet, vKey4);
2131 
2132     // load the 10th round key to vKey1
2133     __ addi            (keypos, keypos, 16);
2134     __ lvx             (vTmp2, keypos, key);
2135     __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2136 
2137     // load the 11th round key to vKey2
2138     __ addi            (keypos, keypos, 16);
2139     __ lvx             (vTmp1, keypos, key);
2140     __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2141 
2142     // if all round keys are loaded, skip next 4 rounds
2143     __ cmpwi           (CCR0, keylen, 44);
2144     __ beq             (CCR0, L_doLast);
2145 
2146     // 10th - 11th rounds
2147     __ vcipher (vRet, vRet, vKey1);
2148     __ vcipher (vRet, vRet, vKey2);
2149 
2150     // load the 12th round key to vKey1
2151     __ addi            (keypos, keypos, 16);
2152     __ lvx             (vTmp2, keypos, key);
2153     __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2154 
2155     // load the 13th round key to vKey2
2156     __ addi            (keypos, keypos, 16);
2157     __ lvx             (vTmp1, keypos, key);
2158     __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2159 
2160     // if all round keys are loaded, skip next 2 rounds
2161     __ cmpwi           (CCR0, keylen, 52);
2162     __ beq             (CCR0, L_doLast);
2163 
2164     // 12th - 13th rounds
2165     __ vcipher (vRet, vRet, vKey1);
2166     __ vcipher (vRet, vRet, vKey2);
2167 
2168     // load the 14th round key to vKey1
2169     __ addi            (keypos, keypos, 16);
2170     __ lvx             (vTmp2, keypos, key);
2171     __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2172 
2173     // load the 15th round key to vKey2
2174     __ addi            (keypos, keypos, 16);
2175     __ lvx             (vTmp1, keypos, key);
2176     __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2177 
2178     __ bind(L_doLast);
2179 
2180     // last two rounds
2181     __ vcipher (vRet, vRet, vKey1);
2182     __ vcipherlast (vRet, vRet, vKey2);
2183 
2184     __ neg             (temp, to);
2185     __ lvsr            (toPerm, temp);
2186     __ vspltisb        (vTmp2, -1);
2187     __ vxor            (vTmp1, vTmp1, vTmp1);
2188     __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
2189     __ vxor            (toPerm, toPerm, fSplt);
2190     __ lvx             (vTmp1, to);
2191     __ vperm           (vRet, vRet, vRet, toPerm);
2192     __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
2193     __ lvx             (vTmp4, fifteen, to);
2194     __ stvx            (vTmp1, to);
2195     __ vsel            (vRet, vRet, vTmp4, vTmp2);
2196     __ stvx            (vRet, fifteen, to);
2197 
2198     __ blr();
2199      return start;
2200   }
2201 
2202   // Arguments for generated stub (little endian only):
2203   //   R3_ARG1   - source byte array address
2204   //   R4_ARG2   - destination byte array address
2205   //   R5_ARG3   - K (key) in little endian int array
2206   address generate_aescrypt_decryptBlock() {
2207     assert(UseAES, "need AES instructions and misaligned SSE support");
2208     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2209 
2210     address start = __ function_entry();
2211 
2212     Label L_doLast;
2213     Label L_do44;
2214     Label L_do52;
2215     Label L_do60;
2216 
2217     Register from           = R3_ARG1;  // source array address
2218     Register to             = R4_ARG2;  // destination array address
2219     Register key            = R5_ARG3;  // round key array
2220 
2221     Register keylen         = R8;
2222     Register temp           = R9;
2223     Register keypos         = R10;
2224     Register hex            = R11;
2225     Register fifteen        = R12;
2226 
2227     VectorRegister vRet     = VR0;
2228 
2229     VectorRegister vKey1    = VR1;
2230     VectorRegister vKey2    = VR2;
2231     VectorRegister vKey3    = VR3;
2232     VectorRegister vKey4    = VR4;
2233     VectorRegister vKey5    = VR5;
2234 
2235     VectorRegister fromPerm = VR6;
2236     VectorRegister keyPerm  = VR7;
2237     VectorRegister toPerm   = VR8;
2238     VectorRegister fSplt    = VR9;
2239 
2240     VectorRegister vTmp1    = VR10;
2241     VectorRegister vTmp2    = VR11;
2242     VectorRegister vTmp3    = VR12;
2243     VectorRegister vTmp4    = VR13;
2244 
2245     VectorRegister vLow     = VR14;
2246     VectorRegister vHigh    = VR15;
2247 
2248     __ li              (hex, 16);
2249     __ li              (fifteen, 15);
2250     __ vspltisb        (fSplt, 0x0f);
2251 
2252     // load unaligned from[0-15] to vsRet
2253     __ lvx             (vRet, from);
2254     __ lvx             (vTmp1, fifteen, from);
2255     __ lvsl            (fromPerm, from);
2256     __ vxor            (fromPerm, fromPerm, fSplt);
2257     __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
2258 
2259     // load keylen (44 or 52 or 60)
2260     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2261 
2262     // to load keys
2263     __ lvsr            (keyPerm, key);
2264     __ vxor            (vTmp2, vTmp2, vTmp2);
2265     __ vspltisb        (vTmp2, -16);
2266     __ vrld            (keyPerm, keyPerm, vTmp2);
2267     __ vrld            (keyPerm, keyPerm, vTmp2);
2268     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2269 
2270     __ cmpwi           (CCR0, keylen, 44);
2271     __ beq             (CCR0, L_do44);
2272 
2273     __ cmpwi           (CCR0, keylen, 52);
2274     __ beq             (CCR0, L_do52);
2275 
2276     // load the 15th round key to vKey11
2277     __ li              (keypos, 240);
2278     __ lvx             (vTmp1, keypos, key);
2279     __ addi            (keypos, keypos, -16);
2280     __ lvx             (vTmp2, keypos, key);
2281     __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2282 
2283     // load the 14th round key to vKey10
2284     __ addi            (keypos, keypos, -16);
2285     __ lvx             (vTmp1, keypos, key);
2286     __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2287 
2288     // load the 13th round key to vKey10
2289     __ addi            (keypos, keypos, -16);
2290     __ lvx             (vTmp2, keypos, key);
2291     __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2292 
2293     // load the 12th round key to vKey10
2294     __ addi            (keypos, keypos, -16);
2295     __ lvx             (vTmp1, keypos, key);
2296     __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
2297 
2298     // load the 11th round key to vKey10
2299     __ addi            (keypos, keypos, -16);
2300     __ lvx             (vTmp2, keypos, key);
2301     __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
2302 
2303     // 1st - 5th rounds
2304     __ vxor            (vRet, vRet, vKey1);
2305     __ vncipher        (vRet, vRet, vKey2);
2306     __ vncipher        (vRet, vRet, vKey3);
2307     __ vncipher        (vRet, vRet, vKey4);
2308     __ vncipher        (vRet, vRet, vKey5);
2309 
2310     __ b               (L_doLast);
2311 
2312     __ bind            (L_do52);
2313 
2314     // load the 13th round key to vKey11
2315     __ li              (keypos, 208);
2316     __ lvx             (vTmp1, keypos, key);
2317     __ addi            (keypos, keypos, -16);
2318     __ lvx             (vTmp2, keypos, key);
2319     __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2320 
2321     // load the 12th round key to vKey10
2322     __ addi            (keypos, keypos, -16);
2323     __ lvx             (vTmp1, keypos, key);
2324     __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2325 
2326     // load the 11th round key to vKey10
2327     __ addi            (keypos, keypos, -16);
2328     __ lvx             (vTmp2, keypos, key);
2329     __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2330 
2331     // 1st - 3rd rounds
2332     __ vxor            (vRet, vRet, vKey1);
2333     __ vncipher        (vRet, vRet, vKey2);
2334     __ vncipher        (vRet, vRet, vKey3);
2335 
2336     __ b               (L_doLast);
2337 
2338     __ bind            (L_do44);
2339 
2340     // load the 11th round key to vKey11
2341     __ li              (keypos, 176);
2342     __ lvx             (vTmp1, keypos, key);
2343     __ addi            (keypos, keypos, -16);
2344     __ lvx             (vTmp2, keypos, key);
2345     __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2346 
2347     // 1st round
2348     __ vxor            (vRet, vRet, vKey1);
2349 
2350     __ bind            (L_doLast);
2351 
2352     // load the 10th round key to vKey10
2353     __ addi            (keypos, keypos, -16);
2354     __ lvx             (vTmp1, keypos, key);
2355     __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2356 
2357     // load the 9th round key to vKey10
2358     __ addi            (keypos, keypos, -16);
2359     __ lvx             (vTmp2, keypos, key);
2360     __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2361 
2362     // load the 8th round key to vKey10
2363     __ addi            (keypos, keypos, -16);
2364     __ lvx             (vTmp1, keypos, key);
2365     __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2366 
2367     // load the 7th round key to vKey10
2368     __ addi            (keypos, keypos, -16);
2369     __ lvx             (vTmp2, keypos, key);
2370     __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2371 
2372     // load the 6th round key to vKey10
2373     __ addi            (keypos, keypos, -16);
2374     __ lvx             (vTmp1, keypos, key);
2375     __ vperm           (vKey5, vTmp2, vTmp1, keyPerm);
2376 
2377     // last 10th - 6th rounds
2378     __ vncipher        (vRet, vRet, vKey1);
2379     __ vncipher        (vRet, vRet, vKey2);
2380     __ vncipher        (vRet, vRet, vKey3);
2381     __ vncipher        (vRet, vRet, vKey4);
2382     __ vncipher        (vRet, vRet, vKey5);
2383 
2384     // load the 5th round key to vKey10
2385     __ addi            (keypos, keypos, -16);
2386     __ lvx             (vTmp2, keypos, key);
2387     __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2388 
2389     // load the 4th round key to vKey10
2390     __ addi            (keypos, keypos, -16);
2391     __ lvx             (vTmp1, keypos, key);
2392     __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2393 
2394     // load the 3rd round key to vKey10
2395     __ addi            (keypos, keypos, -16);
2396     __ lvx             (vTmp2, keypos, key);
2397     __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2398 
2399     // load the 2nd round key to vKey10
2400     __ addi            (keypos, keypos, -16);
2401     __ lvx             (vTmp1, keypos, key);
2402     __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
2403 
2404     // load the 1st round key to vKey10
2405     __ addi            (keypos, keypos, -16);
2406     __ lvx             (vTmp2, keypos, key);
2407     __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
2408 
2409     // last 5th - 1th rounds
2410     __ vncipher        (vRet, vRet, vKey1);
2411     __ vncipher        (vRet, vRet, vKey2);
2412     __ vncipher        (vRet, vRet, vKey3);
2413     __ vncipher        (vRet, vRet, vKey4);
2414     __ vncipherlast    (vRet, vRet, vKey5);
2415 
2416     __ neg             (temp, to);
2417     __ lvsr            (toPerm, temp);
2418     __ vspltisb        (vTmp2, -1);
2419     __ vxor            (vTmp1, vTmp1, vTmp1);
2420     __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
2421     __ vxor            (toPerm, toPerm, fSplt);
2422     __ lvx             (vTmp1, to);
2423     __ vperm           (vRet, vRet, vRet, toPerm);
2424     __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
2425     __ lvx             (vTmp4, fifteen, to);
2426     __ stvx            (vTmp1, to);
2427     __ vsel            (vRet, vRet, vTmp4, vTmp2);
2428     __ stvx            (vRet, fifteen, to);
2429 
2430     __ blr();
2431      return start;
2432   }
2433 
2434   void generate_arraycopy_stubs() {
2435     // Note: the disjoint stubs must be generated first, some of
2436     // the conjoint stubs use them.
2437 
2438     // non-aligned disjoint versions
2439     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2440     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2441     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2442     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
2443     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
2444     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
2445 
2446     // aligned disjoint versions
2447     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
2448     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
2449     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
2450     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
2451     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
2452     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
2453 
2454     // non-aligned conjoint versions
2455     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2456     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
2457     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
2458     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
2459     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
2460     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
2461 
2462     // aligned conjoint versions
2463     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
2464     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
2465     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
2466     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
2467     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
2468     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
2469 
2470     // fill routines
2471     StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
2472     StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
2473     StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
2474     StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
2475     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2476     StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
2477   }
2478 
2479   // Safefetch stubs.
2480   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2481     // safefetch signatures:
2482     //   int      SafeFetch32(int*      adr, int      errValue);
2483     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2484     //
2485     // arguments:
2486     //   R3_ARG1 = adr
2487     //   R4_ARG2 = errValue
2488     //
2489     // result:
2490     //   R3_RET  = *adr or errValue
2491 
2492     StubCodeMark mark(this, "StubRoutines", name);
2493 
2494     // Entry point, pc or function descriptor.
2495     *entry = __ function_entry();
2496 
2497     // Load *adr into R4_ARG2, may fault.
2498     *fault_pc = __ pc();
2499     switch (size) {
2500       case 4:
2501         // int32_t, signed extended
2502         __ lwa(R4_ARG2, 0, R3_ARG1);
2503         break;
2504       case 8:
2505         // int64_t
2506         __ ld(R4_ARG2, 0, R3_ARG1);
2507         break;
2508       default:
2509         ShouldNotReachHere();
2510     }
2511 
2512     // return errValue or *adr
2513     *continuation_pc = __ pc();
2514     __ mr(R3_RET, R4_ARG2);
2515     __ blr();
2516   }
2517 
2518   /**
2519    * Arguments:
2520    *
2521    * Inputs:
2522    *   R3_ARG1    - int   crc
2523    *   R4_ARG2    - byte* buf
2524    *   R5_ARG3    - int   length (of buffer)
2525    *
2526    * scratch:
2527    *   R2, R6-R12
2528    *
2529    * Ouput:
2530    *   R3_RET     - int   crc result
2531    */
2532   // Compute CRC32 function.
2533   address generate_CRC32_updateBytes(const char* name) {
2534     __ align(CodeEntryAlignment);
2535     StubCodeMark mark(this, "StubRoutines", name);
2536     address start = __ function_entry();  // Remember stub start address (is rtn value).
2537 
2538     // arguments to kernel_crc32:
2539     const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
2540     const Register data    = R4_ARG2;  // source byte array
2541     const Register dataLen = R5_ARG3;  // #bytes to process
2542 
2543     const Register table   = R6;       // crc table address
2544 
2545 #ifdef VM_LITTLE_ENDIAN
2546     if (VM_Version::has_vpmsumb()) {
2547       const Register constants    = R2;  // constants address
2548       const Register bconstants   = R8;  // barret table address
2549 
2550       const Register t0      = R9;
2551       const Register t1      = R10;
2552       const Register t2      = R11;
2553       const Register t3      = R12;
2554       const Register t4      = R7;
2555 
2556       BLOCK_COMMENT("Stub body {");
2557       assert_different_registers(crc, data, dataLen, table);
2558 
2559       StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
2560       StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
2561       StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
2562 
2563       __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4);
2564 
2565       BLOCK_COMMENT("return");
2566       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
2567       __ blr();
2568 
2569       BLOCK_COMMENT("} Stub body");
2570     } else
2571 #endif
2572     {
2573       const Register t0      = R2;
2574       const Register t1      = R7;
2575       const Register t2      = R8;
2576       const Register t3      = R9;
2577       const Register tc0     = R10;
2578       const Register tc1     = R11;
2579       const Register tc2     = R12;
2580 
2581       BLOCK_COMMENT("Stub body {");
2582       assert_different_registers(crc, data, dataLen, table);
2583 
2584       StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
2585 
2586       __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
2587 
2588       BLOCK_COMMENT("return");
2589       __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
2590       __ blr();
2591 
2592       BLOCK_COMMENT("} Stub body");
2593     }
2594 
2595     return start;
2596   }
2597 
2598   // Initialization
2599   void generate_initial() {
2600     // Generates all stubs and initializes the entry points
2601 
2602     // Entry points that exist in all platforms.
2603     // Note: This is code that could be shared among different platforms - however the
2604     // benefit seems to be smaller than the disadvantage of having a
2605     // much more complicated generator structure. See also comment in
2606     // stubRoutines.hpp.
2607 
2608     StubRoutines::_forward_exception_entry          = generate_forward_exception();
2609     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
2610     StubRoutines::_catch_exception_entry            = generate_catch_exception();
2611 
2612     // Build this early so it's available for the interpreter.
2613     StubRoutines::_throw_StackOverflowError_entry   =
2614       generate_throw_exception("StackOverflowError throw_exception",
2615                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2616 
2617     // CRC32 Intrinsics.
2618     if (UseCRC32Intrinsics) {
2619       StubRoutines::_crc_table_adr    = (address)StubRoutines::ppc64::_crc_table;
2620       StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
2621     }
2622   }
2623 
2624   void generate_all() {
2625     // Generates all stubs and initializes the entry points
2626 
2627     // These entry points require SharedInfo::stack0 to be set up in
2628     // non-core builds
2629     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
2630     // Handle IncompatibleClassChangeError in itable stubs.
2631     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
2632     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2633 
2634     StubRoutines::_handler_for_unsafe_access_entry         = generate_handler_for_unsafe_access();
2635 
2636     // support for verify_oop (must happen after universe_init)
2637     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
2638 
2639     // arraycopy stubs used by compilers
2640     generate_arraycopy_stubs();
2641 
2642     // Safefetch stubs.
2643     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
2644                                                        &StubRoutines::_safefetch32_fault_pc,
2645                                                        &StubRoutines::_safefetch32_continuation_pc);
2646     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
2647                                                        &StubRoutines::_safefetchN_fault_pc,
2648                                                        &StubRoutines::_safefetchN_continuation_pc);
2649 
2650     if (UseAESIntrinsics) {
2651       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
2652       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
2653     }
2654 
2655     if (UseMontgomeryMultiplyIntrinsic) {
2656       StubRoutines::_montgomeryMultiply
2657         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
2658     }
2659     if (UseMontgomerySquareIntrinsic) {
2660       StubRoutines::_montgomerySquare
2661         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
2662     }
2663   }
2664 
2665  public:
2666   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2667     // replace the standard masm with a special one:
2668     _masm = new MacroAssembler(code);
2669     if (all) {
2670       generate_all();
2671     } else {
2672       generate_initial();
2673     }
2674   }
2675 };
2676 
2677 void StubGenerator_generate(CodeBuffer* code, bool all) {
2678   StubGenerator g(code, all);
2679 }