1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "nativeInst_ppc.hpp"
  31 #include "oops/instanceOop.hpp"
  32 #include "oops/method.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "prims/methodHandles.hpp"
  36 #include "runtime/frame.inline.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubCodeGenerator.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/top.hpp"
  42 #ifdef COMPILER2
  43 #include "opto/runtime.hpp"
  44 #endif
  45 #include "runtime/thread.inline.hpp"
  46 
  47 #define __ _masm->
  48 
  49 #ifdef PRODUCT
  50 #define BLOCK_COMMENT(str) // nothing
  51 #else
  52 #define BLOCK_COMMENT(str) __ block_comment(str)
  53 #endif
  54 
  55 class StubGenerator: public StubCodeGenerator {
  56  private:
  57 
  58   // Call stubs are used to call Java from C
  59   //
  60   // Arguments:
  61   //
  62   //   R3  - call wrapper address     : address
  63   //   R4  - result                   : intptr_t*
  64   //   R5  - result type              : BasicType
  65   //   R6  - method                   : Method
  66   //   R7  - frame mgr entry point    : address
  67   //   R8  - parameter block          : intptr_t*
  68   //   R9  - parameter count in words : int
  69   //   R10 - thread                   : Thread*
  70   //
  71   address generate_call_stub(address& return_address) {
  72     // Setup a new c frame, copy java arguments, call frame manager or
  73     // native_entry, and process result.
  74 
  75     StubCodeMark mark(this, "StubRoutines", "call_stub");
  76 
  77     address start = __ function_entry();
  78 
  79     // some sanity checks
  80     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
  81     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
  82     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
  83     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
  84     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
  85 
  86     Register r_arg_call_wrapper_addr        = R3;
  87     Register r_arg_result_addr              = R4;
  88     Register r_arg_result_type              = R5;
  89     Register r_arg_method                   = R6;
  90     Register r_arg_entry                    = R7;
  91     Register r_arg_thread                   = R10;
  92 
  93     Register r_temp                         = R24;
  94     Register r_top_of_arguments_addr        = R25;
  95     Register r_entryframe_fp                = R26;
  96 
  97     {
  98       // Stack on entry to call_stub:
  99       //
 100       //      F1      [C_FRAME]
 101       //              ...
 102 
 103       Register r_arg_argument_addr          = R8;
 104       Register r_arg_argument_count         = R9;
 105       Register r_frame_alignment_in_bytes   = R27;
 106       Register r_argument_addr              = R28;
 107       Register r_argumentcopy_addr          = R29;
 108       Register r_argument_size_in_bytes     = R30;
 109       Register r_frame_size                 = R23;
 110 
 111       Label arguments_copied;
 112 
 113       // Save LR/CR to caller's C_FRAME.
 114       __ save_LR_CR(R0);
 115 
 116       // Zero extend arg_argument_count.
 117       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
 118 
 119       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
 120       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 121 
 122       // Keep copy of our frame pointer (caller's SP).
 123       __ mr(r_entryframe_fp, R1_SP);
 124 
 125       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
 126       // Push ENTRY_FRAME including arguments:
 127       //
 128       //      F0      [TOP_IJAVA_FRAME_ABI]
 129       //              alignment (optional)
 130       //              [outgoing Java arguments]
 131       //              [ENTRY_FRAME_LOCALS]
 132       //      F1      [C_FRAME]
 133       //              ...
 134 
 135       // calculate frame size
 136 
 137       // unaligned size of arguments
 138       __ sldi(r_argument_size_in_bytes,
 139                   r_arg_argument_count, Interpreter::logStackElementSize);
 140       // arguments alignment (max 1 slot)
 141       // FIXME: use round_to() here
 142       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
 143       __ sldi(r_frame_alignment_in_bytes,
 144               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
 145 
 146       // size = unaligned size of arguments + top abi's size
 147       __ addi(r_frame_size, r_argument_size_in_bytes,
 148               frame::top_ijava_frame_abi_size);
 149       // size += arguments alignment
 150       __ add(r_frame_size,
 151              r_frame_size, r_frame_alignment_in_bytes);
 152       // size += size of call_stub locals
 153       __ addi(r_frame_size,
 154               r_frame_size, frame::entry_frame_locals_size);
 155 
 156       // push ENTRY_FRAME
 157       __ push_frame(r_frame_size, r_temp);
 158 
 159       // initialize call_stub locals (step 1)
 160       __ std(r_arg_call_wrapper_addr,
 161              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
 162       __ std(r_arg_result_addr,
 163              _entry_frame_locals_neg(result_address), r_entryframe_fp);
 164       __ std(r_arg_result_type,
 165              _entry_frame_locals_neg(result_type), r_entryframe_fp);
 166       // we will save arguments_tos_address later
 167 
 168 
 169       BLOCK_COMMENT("Copy Java arguments");
 170       // copy Java arguments
 171 
 172       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
 173       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
 174       __ addi(r_top_of_arguments_addr,
 175               R1_SP, frame::top_ijava_frame_abi_size);
 176       __ add(r_top_of_arguments_addr,
 177              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
 178 
 179       // any arguments to copy?
 180       __ cmpdi(CCR0, r_arg_argument_count, 0);
 181       __ beq(CCR0, arguments_copied);
 182 
 183       // prepare loop and copy arguments in reverse order
 184       {
 185         // init CTR with arg_argument_count
 186         __ mtctr(r_arg_argument_count);
 187 
 188         // let r_argumentcopy_addr point to last outgoing Java arguments P
 189         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
 190 
 191         // let r_argument_addr point to last incoming java argument
 192         __ add(r_argument_addr,
 193                    r_arg_argument_addr, r_argument_size_in_bytes);
 194         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 195 
 196         // now loop while CTR > 0 and copy arguments
 197         {
 198           Label next_argument;
 199           __ bind(next_argument);
 200 
 201           __ ld(r_temp, 0, r_argument_addr);
 202           // argument_addr--;
 203           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 204           __ std(r_temp, 0, r_argumentcopy_addr);
 205           // argumentcopy_addr++;
 206           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
 207 
 208           __ bdnz(next_argument);
 209         }
 210       }
 211 
 212       // Arguments copied, continue.
 213       __ bind(arguments_copied);
 214     }
 215 
 216     {
 217       BLOCK_COMMENT("Call frame manager or native entry.");
 218       // Call frame manager or native entry.
 219       Register r_new_arg_entry = R14; // PPC_state;
 220       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
 221                                  r_arg_method, r_arg_thread);
 222 
 223       __ mr(r_new_arg_entry, r_arg_entry);
 224 
 225       // Register state on entry to frame manager / native entry:
 226       //
 227       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
 228       //   R19_method  -  Method
 229       //   R16_thread  -  JavaThread*
 230 
 231       // Tos must point to last argument - element_size.
 232 #ifdef CC_INTERP
 233       const Register tos = R17_tos;
 234 #else
 235       const Register tos = R15_esp;
 236 #endif
 237       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
 238 
 239       // initialize call_stub locals (step 2)
 240       // now save tos as arguments_tos_address
 241       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
 242 
 243       // load argument registers for call
 244       __ mr(R19_method, r_arg_method);
 245       __ mr(R16_thread, r_arg_thread);
 246       assert(tos != r_arg_method, "trashed r_arg_method");
 247       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
 248 
 249       // Set R15_prev_state to 0 for simplifying checks in callee.
 250 #ifdef CC_INTERP
 251       __ li(R15_prev_state, 0);
 252 #else
 253       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
 254 #endif
 255       // Stack on entry to frame manager / native entry:
 256       //
 257       //      F0      [TOP_IJAVA_FRAME_ABI]
 258       //              alignment (optional)
 259       //              [outgoing Java arguments]
 260       //              [ENTRY_FRAME_LOCALS]
 261       //      F1      [C_FRAME]
 262       //              ...
 263       //
 264 
 265       // global toc register
 266       __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
 267 
 268       // Load narrow oop base.
 269       __ reinit_heapbase(R30, R11_scratch1);
 270 
 271       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
 272       // when called via a c2i.
 273 
 274       // Pass initial_caller_sp to framemanager.
 275       __ mr(R21_tmp1, R1_SP);
 276 
 277       // Do a light-weight C-call here, r_new_arg_entry holds the address
 278       // of the interpreter entry point (frame manager or native entry)
 279       // and save runtime-value of LR in return_address.
 280       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
 281              "trashed r_new_arg_entry");
 282       return_address = __ call_stub(r_new_arg_entry);
 283     }
 284 
 285     {
 286       BLOCK_COMMENT("Returned from frame manager or native entry.");
 287       // Returned from frame manager or native entry.
 288       // Now pop frame, process result, and return to caller.
 289 
 290       // Stack on exit from frame manager / native entry:
 291       //
 292       //      F0      [ABI]
 293       //              ...
 294       //              [ENTRY_FRAME_LOCALS]
 295       //      F1      [C_FRAME]
 296       //              ...
 297       //
 298       // Just pop the topmost frame ...
 299       //
 300 
 301       Label ret_is_object;
 302       Label ret_is_long;
 303       Label ret_is_float;
 304       Label ret_is_double;
 305 
 306       Register r_entryframe_fp = R30;
 307       Register r_lr            = R7_ARG5;
 308       Register r_cr            = R8_ARG6;
 309 
 310       // Reload some volatile registers which we've spilled before the call
 311       // to frame manager / native entry.
 312       // Access all locals via frame pointer, because we know nothing about
 313       // the topmost frame's size.
 314       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
 315       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
 316       __ ld(r_arg_result_addr,
 317             _entry_frame_locals_neg(result_address), r_entryframe_fp);
 318       __ ld(r_arg_result_type,
 319             _entry_frame_locals_neg(result_type), r_entryframe_fp);
 320       __ ld(r_cr, _abi(cr), r_entryframe_fp);
 321       __ ld(r_lr, _abi(lr), r_entryframe_fp);
 322 
 323       // pop frame and restore non-volatiles, LR and CR
 324       __ mr(R1_SP, r_entryframe_fp);
 325       __ mtcr(r_cr);
 326       __ mtlr(r_lr);
 327 
 328       // Store result depending on type. Everything that is not
 329       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
 330       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
 331       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
 332       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
 333       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
 334 
 335       // restore non-volatile registers
 336       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 337 
 338 
 339       // Stack on exit from call_stub:
 340       //
 341       //      0       [C_FRAME]
 342       //              ...
 343       //
 344       //  no call_stub frames left.
 345 
 346       // All non-volatiles have been restored at this point!!
 347       assert(R3_RET == R3, "R3_RET should be R3");
 348 
 349       __ beq(CCR0, ret_is_object);
 350       __ beq(CCR1, ret_is_long);
 351       __ beq(CCR5, ret_is_float);
 352       __ beq(CCR6, ret_is_double);
 353 
 354       // default:
 355       __ stw(R3_RET, 0, r_arg_result_addr);
 356       __ blr(); // return to caller
 357 
 358       // case T_OBJECT:
 359       __ bind(ret_is_object);
 360       __ std(R3_RET, 0, r_arg_result_addr);
 361       __ blr(); // return to caller
 362 
 363       // case T_LONG:
 364       __ bind(ret_is_long);
 365       __ std(R3_RET, 0, r_arg_result_addr);
 366       __ blr(); // return to caller
 367 
 368       // case T_FLOAT:
 369       __ bind(ret_is_float);
 370       __ stfs(F1_RET, 0, r_arg_result_addr);
 371       __ blr(); // return to caller
 372 
 373       // case T_DOUBLE:
 374       __ bind(ret_is_double);
 375       __ stfd(F1_RET, 0, r_arg_result_addr);
 376       __ blr(); // return to caller
 377     }
 378 
 379     return start;
 380   }
 381 
 382   // Return point for a Java call if there's an exception thrown in
 383   // Java code.  The exception is caught and transformed into a
 384   // pending exception stored in JavaThread that can be tested from
 385   // within the VM.
 386   //
 387   address generate_catch_exception() {
 388     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 389 
 390     address start = __ pc();
 391 
 392     // Registers alive
 393     //
 394     //  R16_thread
 395     //  R3_ARG1 - address of pending exception
 396     //  R4_ARG2 - return address in call stub
 397 
 398     const Register exception_file = R21_tmp1;
 399     const Register exception_line = R22_tmp2;
 400 
 401     __ load_const(exception_file, (void*)__FILE__);
 402     __ load_const(exception_line, (void*)__LINE__);
 403 
 404     __ std(R3_ARG1, thread_(pending_exception));
 405     // store into `char *'
 406     __ std(exception_file, thread_(exception_file));
 407     // store into `int'
 408     __ stw(exception_line, thread_(exception_line));
 409 
 410     // complete return to VM
 411     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 412 
 413     __ mtlr(R4_ARG2);
 414     // continue in call stub
 415     __ blr();
 416 
 417     return start;
 418   }
 419 
 420   // Continuation point for runtime calls returning with a pending
 421   // exception.  The pending exception check happened in the runtime
 422   // or native call stub.  The pending exception in Thread is
 423   // converted into a Java-level exception.
 424   //
 425   address generate_forward_exception() {
 426     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 427     address start = __ pc();
 428 
 429 #if !defined(PRODUCT)
 430     if (VerifyOops) {
 431       // Get pending exception oop.
 432       __ ld(R3_ARG1,
 433                 in_bytes(Thread::pending_exception_offset()),
 434                 R16_thread);
 435       // Make sure that this code is only executed if there is a pending exception.
 436       {
 437         Label L;
 438         __ cmpdi(CCR0, R3_ARG1, 0);
 439         __ bne(CCR0, L);
 440         __ stop("StubRoutines::forward exception: no pending exception (1)");
 441         __ bind(L);
 442       }
 443       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
 444     }
 445 #endif
 446 
 447     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
 448     __ save_LR_CR(R4_ARG2);
 449     __ push_frame_reg_args(0, R0);
 450     // Find exception handler.
 451     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 452                      SharedRuntime::exception_handler_for_return_address),
 453                     R16_thread,
 454                     R4_ARG2);
 455     // Copy handler's address.
 456     __ mtctr(R3_RET);
 457     __ pop_frame();
 458     __ restore_LR_CR(R0);
 459 
 460     // Set up the arguments for the exception handler:
 461     //  - R3_ARG1: exception oop
 462     //  - R4_ARG2: exception pc.
 463 
 464     // Load pending exception oop.
 465     __ ld(R3_ARG1,
 466               in_bytes(Thread::pending_exception_offset()),
 467               R16_thread);
 468 
 469     // The exception pc is the return address in the caller.
 470     // Must load it into R4_ARG2.
 471     __ mflr(R4_ARG2);
 472 
 473 #ifdef ASSERT
 474     // Make sure exception is set.
 475     {
 476       Label L;
 477       __ cmpdi(CCR0, R3_ARG1, 0);
 478       __ bne(CCR0, L);
 479       __ stop("StubRoutines::forward exception: no pending exception (2)");
 480       __ bind(L);
 481     }
 482 #endif
 483 
 484     // Clear the pending exception.
 485     __ li(R0, 0);
 486     __ std(R0,
 487                in_bytes(Thread::pending_exception_offset()),
 488                R16_thread);
 489     // Jump to exception handler.
 490     __ bctr();
 491 
 492     return start;
 493   }
 494 
 495 #undef __
 496 #define __ masm->
 497   // Continuation point for throwing of implicit exceptions that are
 498   // not handled in the current activation. Fabricates an exception
 499   // oop and initiates normal exception dispatching in this
 500   // frame. Only callee-saved registers are preserved (through the
 501   // normal register window / RegisterMap handling).  If the compiler
 502   // needs all registers to be preserved between the fault point and
 503   // the exception handler then it must assume responsibility for that
 504   // in AbstractCompiler::continuation_for_implicit_null_exception or
 505   // continuation_for_implicit_division_by_zero_exception. All other
 506   // implicit exceptions (e.g., NullPointerException or
 507   // AbstractMethodError on entry) are either at call sites or
 508   // otherwise assume that stack unwinding will be initiated, so
 509   // caller saved registers were assumed volatile in the compiler.
 510   //
 511   // Note that we generate only this stub into a RuntimeStub, because
 512   // it needs to be properly traversed and ignored during GC, so we
 513   // change the meaning of the "__" macro within this method.
 514   //
 515   // Note: the routine set_pc_not_at_call_for_caller in
 516   // SharedRuntime.cpp requires that this code be generated into a
 517   // RuntimeStub.
 518   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 519                                    Register arg1 = noreg, Register arg2 = noreg) {
 520     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
 521     MacroAssembler* masm = new MacroAssembler(&code);
 522 
 523     OopMapSet* oop_maps  = new OopMapSet();
 524     int frame_size_in_bytes = frame::abi_reg_args_size;
 525     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
 526 
 527     StubCodeMark mark(this, "StubRoutines", "throw_exception");
 528 
 529     address start = __ pc();
 530 
 531     __ save_LR_CR(R11_scratch1);
 532 
 533     // Push a frame.
 534     __ push_frame_reg_args(0, R11_scratch1);
 535 
 536     address frame_complete_pc = __ pc();
 537 
 538     if (restore_saved_exception_pc) {
 539       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
 540     }
 541 
 542     // Note that we always have a runtime stub frame on the top of
 543     // stack by this point. Remember the offset of the instruction
 544     // whose address will be moved to R11_scratch1.
 545     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
 546 
 547     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
 548 
 549     __ mr(R3_ARG1, R16_thread);
 550     if (arg1 != noreg) {
 551       __ mr(R4_ARG2, arg1);
 552     }
 553     if (arg2 != noreg) {
 554       __ mr(R5_ARG3, arg2);
 555     }
 556 #if defined(ABI_ELFv2)
 557     __ call_c(runtime_entry, relocInfo::none);
 558 #else
 559     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
 560 #endif
 561 
 562     // Set an oopmap for the call site.
 563     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
 564 
 565     __ reset_last_Java_frame();
 566 
 567 #ifdef ASSERT
 568     // Make sure that this code is only executed if there is a pending
 569     // exception.
 570     {
 571       Label L;
 572       __ ld(R0,
 573                 in_bytes(Thread::pending_exception_offset()),
 574                 R16_thread);
 575       __ cmpdi(CCR0, R0, 0);
 576       __ bne(CCR0, L);
 577       __ stop("StubRoutines::throw_exception: no pending exception");
 578       __ bind(L);
 579     }
 580 #endif
 581 
 582     // Pop frame.
 583     __ pop_frame();
 584 
 585     __ restore_LR_CR(R11_scratch1);
 586 
 587     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
 588     __ mtctr(R11_scratch1);
 589     __ bctr();
 590 
 591     // Create runtime stub with OopMap.
 592     RuntimeStub* stub =
 593       RuntimeStub::new_runtime_stub(name, &code,
 594                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
 595                                     frame_size_in_bytes/wordSize,
 596                                     oop_maps,
 597                                     false);
 598     return stub->entry_point();
 599   }
 600 #undef __
 601 #define __ _masm->
 602 
 603   //  Generate G1 pre-write barrier for array.
 604   //
 605   //  Input:
 606   //     from     - register containing src address (only needed for spilling)
 607   //     to       - register containing starting address
 608   //     count    - register containing element count
 609   //     tmp      - scratch register
 610   //
 611   //  Kills:
 612   //     nothing
 613   //
 614   void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) {
 615     BarrierSet* const bs = Universe::heap()->barrier_set();
 616     switch (bs->kind()) {
 617       case BarrierSet::G1SATBCT:
 618       case BarrierSet::G1SATBCTLogging:
 619         // With G1, don't generate the call if we statically know that the target in uninitialized
 620         if (!dest_uninitialized) {
 621           const int spill_slots = 4 * wordSize;
 622           const int frame_size  = frame::abi_reg_args_size + spill_slots;
 623           Label filtered;
 624 
 625           // Is marking active?
 626           if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
 627             __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
 628           } else {
 629             guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
 630             __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
 631           }
 632           __ cmpdi(CCR0, Rtmp1, 0);
 633           __ beq(CCR0, filtered);
 634 
 635           __ save_LR_CR(R0);
 636           __ push_frame_reg_args(spill_slots, R0);
 637           __ std(from,  frame_size - 1 * wordSize, R1_SP);
 638           __ std(to,    frame_size - 2 * wordSize, R1_SP);
 639           __ std(count, frame_size - 3 * wordSize, R1_SP);
 640 
 641           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
 642 
 643           __ ld(from,  frame_size - 1 * wordSize, R1_SP);
 644           __ ld(to,    frame_size - 2 * wordSize, R1_SP);
 645           __ ld(count, frame_size - 3 * wordSize, R1_SP);
 646           __ pop_frame();
 647           __ restore_LR_CR(R0);
 648 
 649           __ bind(filtered);
 650         }
 651         break;
 652       case BarrierSet::CardTableModRef:
 653       case BarrierSet::CardTableExtension:
 654       case BarrierSet::ModRef:
 655         break;
 656       default:
 657         ShouldNotReachHere();
 658     }
 659   }
 660 
 661   //  Generate CMS/G1 post-write barrier for array.
 662   //
 663   //  Input:
 664   //     addr     - register containing starting address
 665   //     count    - register containing element count
 666   //     tmp      - scratch register
 667   //
 668   //  The input registers and R0 are overwritten.
 669   //
 670   void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) {
 671     BarrierSet* const bs = Universe::heap()->barrier_set();
 672 
 673     switch (bs->kind()) {
 674       case BarrierSet::G1SATBCT:
 675       case BarrierSet::G1SATBCTLogging:
 676         {
 677           if (branchToEnd) {
 678             __ save_LR_CR(R0);
 679             // We need this frame only to spill LR.
 680             __ push_frame_reg_args(0, R0);
 681             __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
 682             __ pop_frame();
 683             __ restore_LR_CR(R0);
 684           } else {
 685             // Tail call: fake call from stub caller by branching without linking.
 686             address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
 687             __ mr_if_needed(R3_ARG1, addr);
 688             __ mr_if_needed(R4_ARG2, count);
 689             __ load_const(R11, entry_point, R0);
 690             __ call_c_and_return_to_caller(R11);
 691           }
 692         }
 693         break;
 694       case BarrierSet::CardTableModRef:
 695       case BarrierSet::CardTableExtension:
 696         {
 697           Label Lskip_loop, Lstore_loop;
 698           if (UseConcMarkSweepGC) {
 699             // TODO PPC port: contribute optimization / requires shared changes
 700             __ release();
 701           }
 702 
 703           CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
 704           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 705           assert_different_registers(addr, count, tmp);
 706 
 707           __ sldi(count, count, LogBytesPerHeapOop);
 708           __ addi(count, count, -BytesPerHeapOop);
 709           __ add(count, addr, count);
 710           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 711           __ srdi(addr, addr, CardTableModRefBS::card_shift);
 712           __ srdi(count, count, CardTableModRefBS::card_shift);
 713           __ subf(count, addr, count);
 714           assert_different_registers(R0, addr, count, tmp);
 715           __ load_const(tmp, (address)ct->byte_map_base);
 716           __ addic_(count, count, 1);
 717           __ beq(CCR0, Lskip_loop);
 718           __ li(R0, 0);
 719           __ mtctr(count);
 720           // Byte store loop
 721           __ bind(Lstore_loop);
 722           __ stbx(R0, tmp, addr);
 723           __ addi(addr, addr, 1);
 724           __ bdnz(Lstore_loop);
 725           __ bind(Lskip_loop);
 726 
 727           if (!branchToEnd) __ blr();
 728         }
 729       break;
 730       case BarrierSet::ModRef:
 731         if (!branchToEnd) __ blr();
 732         break;
 733       default:
 734         ShouldNotReachHere();
 735     }
 736   }
 737 
 738   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
 739   //
 740   // Arguments:
 741   //   to:
 742   //   count:
 743   //
 744   // Destroys:
 745   //
 746   address generate_zero_words_aligned8() {
 747     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
 748 
 749     // Implemented as in ClearArray.
 750     address start = __ function_entry();
 751 
 752     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
 753     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
 754     Register tmp1_reg       = R5_ARG3;
 755     Register tmp2_reg       = R6_ARG4;
 756     Register zero_reg       = R7_ARG5;
 757 
 758     // Procedure for large arrays (uses data cache block zero instruction).
 759     Label dwloop, fast, fastloop, restloop, lastdword, done;
 760     int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords);
 761     int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
 762 
 763     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
 764     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
 765     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
 766     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
 767     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
 768 
 769     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
 770     __ beq(CCR0, lastdword);                    // size <= 1
 771     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
 772     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
 773     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
 774 
 775     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
 776     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
 777 
 778     __ beq(CCR0, fast);                         // already 128byte aligned
 779     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
 780     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
 781 
 782     // Clear in first cache line dword-by-dword if not already 128byte aligned.
 783     __ bind(dwloop);
 784       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 785       __ addi(base_ptr_reg, base_ptr_reg, 8);
 786     __ bdnz(dwloop);
 787 
 788     // clear 128byte blocks
 789     __ bind(fast);
 790     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
 791     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
 792 
 793     __ mtctr(tmp1_reg);                         // load counter
 794     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
 795     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
 796 
 797     __ bind(fastloop);
 798       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
 799       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
 800     __ bdnz(fastloop);
 801 
 802     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
 803     __ beq(CCR0, lastdword);                    // rest<=1
 804     __ mtctr(tmp1_reg);                         // load counter
 805 
 806     // Clear rest.
 807     __ bind(restloop);
 808       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 809       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
 810       __ addi(base_ptr_reg, base_ptr_reg, 16);
 811     __ bdnz(restloop);
 812 
 813     __ bind(lastdword);
 814     __ beq(CCR1, done);
 815     __ std(zero_reg, 0, base_ptr_reg);
 816     __ bind(done);
 817     __ blr();                                   // return
 818 
 819     return start;
 820   }
 821 
 822   // The following routine generates a subroutine to throw an asynchronous
 823   // UnknownError when an unsafe access gets a fault that could not be
 824   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 825   //
 826   address generate_handler_for_unsafe_access() {
 827     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 828     address start = __ function_entry();
 829     __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
 830     return start;
 831   }
 832 
 833 #if !defined(PRODUCT)
 834   // Wrapper which calls oopDesc::is_oop_or_null()
 835   // Only called by MacroAssembler::verify_oop
 836   static void verify_oop_helper(const char* message, oop o) {
 837     if (!o->is_oop_or_null()) {
 838       fatal(message);
 839     }
 840     ++ StubRoutines::_verify_oop_count;
 841   }
 842 #endif
 843 
 844   // Return address of code to be called from code generated by
 845   // MacroAssembler::verify_oop.
 846   //
 847   // Don't generate, rather use C++ code.
 848   address generate_verify_oop() {
 849     StubCodeMark mark(this, "StubRoutines", "verify_oop");
 850 
 851     // this is actually a `FunctionDescriptor*'.
 852     address start = 0;
 853 
 854 #if !defined(PRODUCT)
 855     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 856 #endif
 857 
 858     return start;
 859   }
 860 
 861   // Fairer handling of safepoints for native methods.
 862   //
 863   // Generate code which reads from the polling page. This special handling is needed as the
 864   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 865   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 866   // to read from the safepoint polling page.
 867   address generate_load_from_poll() {
 868     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 869     address start = __ function_entry();
 870     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 871     return start;
 872   }
 873 
 874   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 875   //
 876   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 877   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 878   //
 879   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 880   // for turning on loop predication optimization, and hence the behavior of "array range check"
 881   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 882   //
 883   // Generate stub for disjoint short fill. If "aligned" is true, the
 884   // "to" address is assumed to be heapword aligned.
 885   //
 886   // Arguments for generated stub:
 887   //   to:    R3_ARG1
 888   //   value: R4_ARG2
 889   //   count: R5_ARG3 treated as signed
 890   //
 891   address generate_fill(BasicType t, bool aligned, const char* name) {
 892     StubCodeMark mark(this, "StubRoutines", name);
 893     address start = __ function_entry();
 894 
 895     const Register to    = R3_ARG1;   // source array address
 896     const Register value = R4_ARG2;   // fill value
 897     const Register count = R5_ARG3;   // elements count
 898     const Register temp  = R6_ARG4;   // temp register
 899 
 900     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
 901 
 902     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
 903     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
 904 
 905     int shift = -1;
 906     switch (t) {
 907        case T_BYTE:
 908         shift = 2;
 909         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 910         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
 911         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 912         __ blt(CCR0, L_fill_elements);
 913         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 914         break;
 915        case T_SHORT:
 916         shift = 1;
 917         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 918         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 919         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 920         __ blt(CCR0, L_fill_elements);
 921         break;
 922       case T_INT:
 923         shift = 0;
 924         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 925         __ blt(CCR0, L_fill_4_bytes);
 926         break;
 927       default: ShouldNotReachHere();
 928     }
 929 
 930     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
 931       // Align source address at 4 bytes address boundary.
 932       if (t == T_BYTE) {
 933         // One byte misalignment happens only for byte arrays.
 934         __ andi_(temp, to, 1);
 935         __ beq(CCR0, L_skip_align1);
 936         __ stb(value, 0, to);
 937         __ addi(to, to, 1);
 938         __ addi(count, count, -1);
 939         __ bind(L_skip_align1);
 940       }
 941       // Two bytes misalignment happens only for byte and short (char) arrays.
 942       __ andi_(temp, to, 2);
 943       __ beq(CCR0, L_skip_align2);
 944       __ sth(value, 0, to);
 945       __ addi(to, to, 2);
 946       __ addi(count, count, -(1 << (shift - 1)));
 947       __ bind(L_skip_align2);
 948     }
 949 
 950     if (!aligned) {
 951       // Align to 8 bytes, we know we are 4 byte aligned to start.
 952       __ andi_(temp, to, 7);
 953       __ beq(CCR0, L_fill_32_bytes);
 954       __ stw(value, 0, to);
 955       __ addi(to, to, 4);
 956       __ addi(count, count, -(1 << shift));
 957       __ bind(L_fill_32_bytes);
 958     }
 959 
 960     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
 961     // Clone bytes int->long as above.
 962     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
 963 
 964     Label L_check_fill_8_bytes;
 965     // Fill 32-byte chunks.
 966     __ subf_(count, temp, count);
 967     __ blt(CCR0, L_check_fill_8_bytes);
 968 
 969     Label L_fill_32_bytes_loop;
 970     __ align(32);
 971     __ bind(L_fill_32_bytes_loop);
 972 
 973     __ std(value, 0, to);
 974     __ std(value, 8, to);
 975     __ subf_(count, temp, count);           // Update count.
 976     __ std(value, 16, to);
 977     __ std(value, 24, to);
 978 
 979     __ addi(to, to, 32);
 980     __ bge(CCR0, L_fill_32_bytes_loop);
 981 
 982     __ bind(L_check_fill_8_bytes);
 983     __ add_(count, temp, count);
 984     __ beq(CCR0, L_exit);
 985     __ addic_(count, count, -(2 << shift));
 986     __ blt(CCR0, L_fill_4_bytes);
 987 
 988     //
 989     // Length is too short, just fill 8 bytes at a time.
 990     //
 991     Label L_fill_8_bytes_loop;
 992     __ bind(L_fill_8_bytes_loop);
 993     __ std(value, 0, to);
 994     __ addic_(count, count, -(2 << shift));
 995     __ addi(to, to, 8);
 996     __ bge(CCR0, L_fill_8_bytes_loop);
 997 
 998     // Fill trailing 4 bytes.
 999     __ bind(L_fill_4_bytes);
1000     __ andi_(temp, count, 1<<shift);
1001     __ beq(CCR0, L_fill_2_bytes);
1002 
1003     __ stw(value, 0, to);
1004     if (t == T_BYTE || t == T_SHORT) {
1005       __ addi(to, to, 4);
1006       // Fill trailing 2 bytes.
1007       __ bind(L_fill_2_bytes);
1008       __ andi_(temp, count, 1<<(shift-1));
1009       __ beq(CCR0, L_fill_byte);
1010       __ sth(value, 0, to);
1011       if (t == T_BYTE) {
1012         __ addi(to, to, 2);
1013         // Fill trailing byte.
1014         __ bind(L_fill_byte);
1015         __ andi_(count, count, 1);
1016         __ beq(CCR0, L_exit);
1017         __ stb(value, 0, to);
1018       } else {
1019         __ bind(L_fill_byte);
1020       }
1021     } else {
1022       __ bind(L_fill_2_bytes);
1023     }
1024     __ bind(L_exit);
1025     __ blr();
1026 
1027     // Handle copies less than 8 bytes. Int is handled elsewhere.
1028     if (t == T_BYTE) {
1029       __ bind(L_fill_elements);
1030       Label L_fill_2, L_fill_4;
1031       __ andi_(temp, count, 1);
1032       __ beq(CCR0, L_fill_2);
1033       __ stb(value, 0, to);
1034       __ addi(to, to, 1);
1035       __ bind(L_fill_2);
1036       __ andi_(temp, count, 2);
1037       __ beq(CCR0, L_fill_4);
1038       __ stb(value, 0, to);
1039       __ stb(value, 0, to);
1040       __ addi(to, to, 2);
1041       __ bind(L_fill_4);
1042       __ andi_(temp, count, 4);
1043       __ beq(CCR0, L_exit);
1044       __ stb(value, 0, to);
1045       __ stb(value, 1, to);
1046       __ stb(value, 2, to);
1047       __ stb(value, 3, to);
1048       __ blr();
1049     }
1050 
1051     if (t == T_SHORT) {
1052       Label L_fill_2;
1053       __ bind(L_fill_elements);
1054       __ andi_(temp, count, 1);
1055       __ beq(CCR0, L_fill_2);
1056       __ sth(value, 0, to);
1057       __ addi(to, to, 2);
1058       __ bind(L_fill_2);
1059       __ andi_(temp, count, 2);
1060       __ beq(CCR0, L_exit);
1061       __ sth(value, 0, to);
1062       __ sth(value, 2, to);
1063       __ blr();
1064     }
1065     return start;
1066   }
1067 
1068 
1069   // Generate overlap test for array copy stubs.
1070   //
1071   // Input:
1072   //   R3_ARG1    -  from
1073   //   R4_ARG2    -  to
1074   //   R5_ARG3    -  element count
1075   //
1076   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1077     Register tmp1 = R6_ARG4;
1078     Register tmp2 = R7_ARG5;
1079 
1080     Label l_overlap;
1081 #ifdef ASSERT
1082     __ srdi_(tmp2, R5_ARG3, 31);
1083     __ asm_assert_eq("missing zero extend", 0xAFFE);
1084 #endif
1085 
1086     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1087     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1088     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1089     __ cmpld(CCR1, tmp1, tmp2);
1090     __ crand(/*CCR0 lt*/0, /*CCR1 lt*/4+0, /*CCR0 lt*/0);
1091     __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size.
1092 
1093     // need to copy forwards
1094     if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
1095       __ b(no_overlap_target);
1096     } else {
1097       __ load_const(tmp1, no_overlap_target, tmp2);
1098       __ mtctr(tmp1);
1099       __ bctr();
1100     }
1101 
1102     __ bind(l_overlap);
1103     // need to copy backwards
1104   }
1105 
1106   // The guideline in the implementations of generate_disjoint_xxx_copy
1107   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1108   // single instructions, but to avoid alignment interrupts (see subsequent
1109   // comment). Furthermore, we try to minimize misaligned access, even
1110   // though they cause no alignment interrupt.
1111   //
1112   // In Big-Endian mode, the PowerPC architecture requires implementations to
1113   // handle automatically misaligned integer halfword and word accesses,
1114   // word-aligned integer doubleword accesses, and word-aligned floating-point
1115   // accesses. Other accesses may or may not generate an Alignment interrupt
1116   // depending on the implementation.
1117   // Alignment interrupt handling may require on the order of hundreds of cycles,
1118   // so every effort should be made to avoid misaligned memory values.
1119   //
1120   //
1121   // Generate stub for disjoint byte copy.  If "aligned" is true, the
1122   // "from" and "to" addresses are assumed to be heapword aligned.
1123   //
1124   // Arguments for generated stub:
1125   //      from:  R3_ARG1
1126   //      to:    R4_ARG2
1127   //      count: R5_ARG3 treated as signed
1128   //
1129   address generate_disjoint_byte_copy(bool aligned, const char * name) {
1130     StubCodeMark mark(this, "StubRoutines", name);
1131     address start = __ function_entry();
1132 
1133     Register tmp1 = R6_ARG4;
1134     Register tmp2 = R7_ARG5;
1135     Register tmp3 = R8_ARG6;
1136     Register tmp4 = R9_ARG7;
1137 
1138 
1139     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1140     // Don't try anything fancy if arrays don't have many elements.
1141     __ li(tmp3, 0);
1142     __ cmpwi(CCR0, R5_ARG3, 17);
1143     __ ble(CCR0, l_6); // copy 4 at a time
1144 
1145     if (!aligned) {
1146       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1147       __ andi_(tmp1, tmp1, 3);
1148       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1149 
1150       // Copy elements if necessary to align to 4 bytes.
1151       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1152       __ andi_(tmp1, tmp1, 3);
1153       __ beq(CCR0, l_2);
1154 
1155       __ subf(R5_ARG3, tmp1, R5_ARG3);
1156       __ bind(l_9);
1157       __ lbz(tmp2, 0, R3_ARG1);
1158       __ addic_(tmp1, tmp1, -1);
1159       __ stb(tmp2, 0, R4_ARG2);
1160       __ addi(R3_ARG1, R3_ARG1, 1);
1161       __ addi(R4_ARG2, R4_ARG2, 1);
1162       __ bne(CCR0, l_9);
1163 
1164       __ bind(l_2);
1165     }
1166 
1167     // copy 8 elements at a time
1168     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1169     __ andi_(tmp1, tmp2, 7);
1170     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1171 
1172     // copy a 2-element word if necessary to align to 8 bytes
1173     __ andi_(R0, R3_ARG1, 7);
1174     __ beq(CCR0, l_7);
1175 
1176     __ lwzx(tmp2, R3_ARG1, tmp3);
1177     __ addi(R5_ARG3, R5_ARG3, -4);
1178     __ stwx(tmp2, R4_ARG2, tmp3);
1179     { // FasterArrayCopy
1180       __ addi(R3_ARG1, R3_ARG1, 4);
1181       __ addi(R4_ARG2, R4_ARG2, 4);
1182     }
1183     __ bind(l_7);
1184 
1185     { // FasterArrayCopy
1186       __ cmpwi(CCR0, R5_ARG3, 31);
1187       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1188 
1189       __ srdi(tmp1, R5_ARG3, 5);
1190       __ andi_(R5_ARG3, R5_ARG3, 31);
1191       __ mtctr(tmp1);
1192 
1193       __ bind(l_8);
1194       // Use unrolled version for mass copying (copy 32 elements a time)
1195       // Load feeding store gets zero latency on Power6, however not on Power5.
1196       // Therefore, the following sequence is made for the good of both.
1197       __ ld(tmp1, 0, R3_ARG1);
1198       __ ld(tmp2, 8, R3_ARG1);
1199       __ ld(tmp3, 16, R3_ARG1);
1200       __ ld(tmp4, 24, R3_ARG1);
1201       __ std(tmp1, 0, R4_ARG2);
1202       __ std(tmp2, 8, R4_ARG2);
1203       __ std(tmp3, 16, R4_ARG2);
1204       __ std(tmp4, 24, R4_ARG2);
1205       __ addi(R3_ARG1, R3_ARG1, 32);
1206       __ addi(R4_ARG2, R4_ARG2, 32);
1207       __ bdnz(l_8);
1208     }
1209 
1210     __ bind(l_6);
1211 
1212     // copy 4 elements at a time
1213     __ cmpwi(CCR0, R5_ARG3, 4);
1214     __ blt(CCR0, l_1);
1215     __ srdi(tmp1, R5_ARG3, 2);
1216     __ mtctr(tmp1); // is > 0
1217     __ andi_(R5_ARG3, R5_ARG3, 3);
1218 
1219     { // FasterArrayCopy
1220       __ addi(R3_ARG1, R3_ARG1, -4);
1221       __ addi(R4_ARG2, R4_ARG2, -4);
1222       __ bind(l_3);
1223       __ lwzu(tmp2, 4, R3_ARG1);
1224       __ stwu(tmp2, 4, R4_ARG2);
1225       __ bdnz(l_3);
1226       __ addi(R3_ARG1, R3_ARG1, 4);
1227       __ addi(R4_ARG2, R4_ARG2, 4);
1228     }
1229 
1230     // do single element copy
1231     __ bind(l_1);
1232     __ cmpwi(CCR0, R5_ARG3, 0);
1233     __ beq(CCR0, l_4);
1234 
1235     { // FasterArrayCopy
1236       __ mtctr(R5_ARG3);
1237       __ addi(R3_ARG1, R3_ARG1, -1);
1238       __ addi(R4_ARG2, R4_ARG2, -1);
1239 
1240       __ bind(l_5);
1241       __ lbzu(tmp2, 1, R3_ARG1);
1242       __ stbu(tmp2, 1, R4_ARG2);
1243       __ bdnz(l_5);
1244     }
1245 
1246     __ bind(l_4);
1247     __ blr();
1248 
1249     return start;
1250   }
1251 
1252   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1253   // "from" and "to" addresses are assumed to be heapword aligned.
1254   //
1255   // Arguments for generated stub:
1256   //      from:  R3_ARG1
1257   //      to:    R4_ARG2
1258   //      count: R5_ARG3 treated as signed
1259   //
1260   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1261     StubCodeMark mark(this, "StubRoutines", name);
1262     address start = __ function_entry();
1263 
1264     Register tmp1 = R6_ARG4;
1265     Register tmp2 = R7_ARG5;
1266     Register tmp3 = R8_ARG6;
1267 
1268 #if defined(ABI_ELFv2)
1269      address nooverlap_target = aligned ?
1270        StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
1271        StubRoutines::jbyte_disjoint_arraycopy();
1272 #else
1273     address nooverlap_target = aligned ?
1274       ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
1275       ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
1276 #endif
1277 
1278     array_overlap_test(nooverlap_target, 0);
1279     // Do reverse copy. We assume the case of actual overlap is rare enough
1280     // that we don't have to optimize it.
1281     Label l_1, l_2;
1282 
1283     __ b(l_2);
1284     __ bind(l_1);
1285     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1286     __ bind(l_2);
1287     __ addic_(R5_ARG3, R5_ARG3, -1);
1288     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1289     __ bge(CCR0, l_1);
1290 
1291     __ blr();
1292 
1293     return start;
1294   }
1295 
1296   // Generate stub for disjoint short copy.  If "aligned" is true, the
1297   // "from" and "to" addresses are assumed to be heapword aligned.
1298   //
1299   // Arguments for generated stub:
1300   //      from:  R3_ARG1
1301   //      to:    R4_ARG2
1302   //  elm.count: R5_ARG3 treated as signed
1303   //
1304   // Strategy for aligned==true:
1305   //
1306   //  If length <= 9:
1307   //     1. copy 2 elements at a time (l_6)
1308   //     2. copy last element if original element count was odd (l_1)
1309   //
1310   //  If length > 9:
1311   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1312   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1313   //     3. copy last element if one was left in step 2. (l_1)
1314   //
1315   //
1316   // Strategy for aligned==false:
1317   //
1318   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1319   //                  can be unaligned (see comment below)
1320   //
1321   //  If length > 9:
1322   //     1. continue with step 6. if the alignment of from and to mod 4
1323   //        is different.
1324   //     2. align from and to to 4 bytes by copying 1 element if necessary
1325   //     3. at l_2 from and to are 4 byte aligned; continue with
1326   //        5. if they cannot be aligned to 8 bytes because they have
1327   //        got different alignment mod 8.
1328   //     4. at this point we know that both, from and to, have the same
1329   //        alignment mod 8, now copy one element if necessary to get
1330   //        8 byte alignment of from and to.
1331   //     5. copy 4 elements at a time until less than 4 elements are
1332   //        left; depending on step 3. all load/stores are aligned or
1333   //        either all loads or all stores are unaligned.
1334   //     6. copy 2 elements at a time until less than 2 elements are
1335   //        left (l_6); arriving here from step 1., there is a chance
1336   //        that all accesses are unaligned.
1337   //     7. copy last element if one was left in step 6. (l_1)
1338   //
1339   //  There are unaligned data accesses using integer load/store
1340   //  instructions in this stub. POWER allows such accesses.
1341   //
1342   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1343   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1344   //  integer load/stores have good performance. Only unaligned
1345   //  floating point load/stores can have poor performance.
1346   //
1347   //  TODO:
1348   //
1349   //  1. check if aligning the backbranch target of loops is beneficial
1350   //
1351   address generate_disjoint_short_copy(bool aligned, const char * name) {
1352     StubCodeMark mark(this, "StubRoutines", name);
1353 
1354     Register tmp1 = R6_ARG4;
1355     Register tmp2 = R7_ARG5;
1356     Register tmp3 = R8_ARG6;
1357     Register tmp4 = R9_ARG7;
1358 
1359     address start = __ function_entry();
1360 
1361       Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
1362     // don't try anything fancy if arrays don't have many elements
1363     __ li(tmp3, 0);
1364     __ cmpwi(CCR0, R5_ARG3, 9);
1365     __ ble(CCR0, l_6); // copy 2 at a time
1366 
1367     if (!aligned) {
1368       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1369       __ andi_(tmp1, tmp1, 3);
1370       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1371 
1372       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1373 
1374       // Copy 1 element if necessary to align to 4 bytes.
1375       __ andi_(tmp1, R3_ARG1, 3);
1376       __ beq(CCR0, l_2);
1377 
1378       __ lhz(tmp2, 0, R3_ARG1);
1379       __ addi(R3_ARG1, R3_ARG1, 2);
1380       __ sth(tmp2, 0, R4_ARG2);
1381       __ addi(R4_ARG2, R4_ARG2, 2);
1382       __ addi(R5_ARG3, R5_ARG3, -1);
1383       __ bind(l_2);
1384 
1385       // At this point the positions of both, from and to, are at least 4 byte aligned.
1386 
1387       // Copy 4 elements at a time.
1388       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1389       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1390       __ andi_(tmp1, tmp2, 7);
1391       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1392 
1393       // Copy a 2-element word if necessary to align to 8 bytes.
1394       __ andi_(R0, R3_ARG1, 7);
1395       __ beq(CCR0, l_7);
1396 
1397       __ lwzx(tmp2, R3_ARG1, tmp3);
1398       __ addi(R5_ARG3, R5_ARG3, -2);
1399       __ stwx(tmp2, R4_ARG2, tmp3);
1400       { // FasterArrayCopy
1401         __ addi(R3_ARG1, R3_ARG1, 4);
1402         __ addi(R4_ARG2, R4_ARG2, 4);
1403       }
1404     }
1405 
1406     __ bind(l_7);
1407 
1408     // Copy 4 elements at a time; either the loads or the stores can
1409     // be unaligned if aligned == false.
1410 
1411     { // FasterArrayCopy
1412       __ cmpwi(CCR0, R5_ARG3, 15);
1413       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1414 
1415       __ srdi(tmp1, R5_ARG3, 4);
1416       __ andi_(R5_ARG3, R5_ARG3, 15);
1417       __ mtctr(tmp1);
1418 
1419       __ bind(l_8);
1420       // Use unrolled version for mass copying (copy 16 elements a time).
1421       // Load feeding store gets zero latency on Power6, however not on Power5.
1422       // Therefore, the following sequence is made for the good of both.
1423       __ ld(tmp1, 0, R3_ARG1);
1424       __ ld(tmp2, 8, R3_ARG1);
1425       __ ld(tmp3, 16, R3_ARG1);
1426       __ ld(tmp4, 24, R3_ARG1);
1427       __ std(tmp1, 0, R4_ARG2);
1428       __ std(tmp2, 8, R4_ARG2);
1429       __ std(tmp3, 16, R4_ARG2);
1430       __ std(tmp4, 24, R4_ARG2);
1431       __ addi(R3_ARG1, R3_ARG1, 32);
1432       __ addi(R4_ARG2, R4_ARG2, 32);
1433       __ bdnz(l_8);
1434     }
1435     __ bind(l_6);
1436 
1437     // copy 2 elements at a time
1438     { // FasterArrayCopy
1439       __ cmpwi(CCR0, R5_ARG3, 2);
1440       __ blt(CCR0, l_1);
1441       __ srdi(tmp1, R5_ARG3, 1);
1442       __ andi_(R5_ARG3, R5_ARG3, 1);
1443 
1444       __ addi(R3_ARG1, R3_ARG1, -4);
1445       __ addi(R4_ARG2, R4_ARG2, -4);
1446       __ mtctr(tmp1);
1447 
1448       __ bind(l_3);
1449       __ lwzu(tmp2, 4, R3_ARG1);
1450       __ stwu(tmp2, 4, R4_ARG2);
1451       __ bdnz(l_3);
1452 
1453       __ addi(R3_ARG1, R3_ARG1, 4);
1454       __ addi(R4_ARG2, R4_ARG2, 4);
1455     }
1456 
1457     // do single element copy
1458     __ bind(l_1);
1459     __ cmpwi(CCR0, R5_ARG3, 0);
1460     __ beq(CCR0, l_4);
1461 
1462     { // FasterArrayCopy
1463       __ mtctr(R5_ARG3);
1464       __ addi(R3_ARG1, R3_ARG1, -2);
1465       __ addi(R4_ARG2, R4_ARG2, -2);
1466 
1467       __ bind(l_5);
1468       __ lhzu(tmp2, 2, R3_ARG1);
1469       __ sthu(tmp2, 2, R4_ARG2);
1470       __ bdnz(l_5);
1471     }
1472     __ bind(l_4);
1473     __ blr();
1474 
1475     return start;
1476   }
1477 
1478   // Generate stub for conjoint short copy.  If "aligned" is true, the
1479   // "from" and "to" addresses are assumed to be heapword aligned.
1480   //
1481   // Arguments for generated stub:
1482   //      from:  R3_ARG1
1483   //      to:    R4_ARG2
1484   //      count: R5_ARG3 treated as signed
1485   //
1486   address generate_conjoint_short_copy(bool aligned, const char * name) {
1487     StubCodeMark mark(this, "StubRoutines", name);
1488     address start = __ function_entry();
1489 
1490     Register tmp1 = R6_ARG4;
1491     Register tmp2 = R7_ARG5;
1492     Register tmp3 = R8_ARG6;
1493 
1494 #if defined(ABI_ELFv2)
1495     address nooverlap_target = aligned ?
1496         StubRoutines::arrayof_jshort_disjoint_arraycopy() :
1497         StubRoutines::jshort_disjoint_arraycopy();
1498 #else
1499     address nooverlap_target = aligned ?
1500         ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
1501         ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
1502 #endif
1503 
1504     array_overlap_test(nooverlap_target, 1);
1505 
1506     Label l_1, l_2;
1507     __ sldi(tmp1, R5_ARG3, 1);
1508     __ b(l_2);
1509     __ bind(l_1);
1510     __ sthx(tmp2, R4_ARG2, tmp1);
1511     __ bind(l_2);
1512     __ addic_(tmp1, tmp1, -2);
1513     __ lhzx(tmp2, R3_ARG1, tmp1);
1514     __ bge(CCR0, l_1);
1515 
1516     __ blr();
1517 
1518     return start;
1519   }
1520 
1521   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1522   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1523   //
1524   // Arguments:
1525   //      from:  R3_ARG1
1526   //      to:    R4_ARG2
1527   //      count: R5_ARG3 treated as signed
1528   //
1529   void generate_disjoint_int_copy_core(bool aligned) {
1530     Register tmp1 = R6_ARG4;
1531     Register tmp2 = R7_ARG5;
1532     Register tmp3 = R8_ARG6;
1533     Register tmp4 = R0;
1534 
1535     Label l_1, l_2, l_3, l_4, l_5, l_6;
1536     // for short arrays, just do single element copy
1537     __ li(tmp3, 0);
1538     __ cmpwi(CCR0, R5_ARG3, 5);
1539     __ ble(CCR0, l_2);
1540 
1541     if (!aligned) {
1542         // check if arrays have same alignment mod 8.
1543         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1544         __ andi_(R0, tmp1, 7);
1545         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1546         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1547 
1548         // copy 1 element to align to and from on an 8 byte boundary
1549         __ andi_(R0, R3_ARG1, 7);
1550         __ beq(CCR0, l_4);
1551 
1552         __ lwzx(tmp2, R3_ARG1, tmp3);
1553         __ addi(R5_ARG3, R5_ARG3, -1);
1554         __ stwx(tmp2, R4_ARG2, tmp3);
1555         { // FasterArrayCopy
1556           __ addi(R3_ARG1, R3_ARG1, 4);
1557           __ addi(R4_ARG2, R4_ARG2, 4);
1558         }
1559         __ bind(l_4);
1560       }
1561 
1562     { // FasterArrayCopy
1563       __ cmpwi(CCR0, R5_ARG3, 7);
1564       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1565 
1566       __ srdi(tmp1, R5_ARG3, 3);
1567       __ andi_(R5_ARG3, R5_ARG3, 7);
1568       __ mtctr(tmp1);
1569 
1570       __ bind(l_6);
1571       // Use unrolled version for mass copying (copy 8 elements a time).
1572       // Load feeding store gets zero latency on power6, however not on power 5.
1573       // Therefore, the following sequence is made for the good of both.
1574       __ ld(tmp1, 0, R3_ARG1);
1575       __ ld(tmp2, 8, R3_ARG1);
1576       __ ld(tmp3, 16, R3_ARG1);
1577       __ ld(tmp4, 24, R3_ARG1);
1578       __ std(tmp1, 0, R4_ARG2);
1579       __ std(tmp2, 8, R4_ARG2);
1580       __ std(tmp3, 16, R4_ARG2);
1581       __ std(tmp4, 24, R4_ARG2);
1582       __ addi(R3_ARG1, R3_ARG1, 32);
1583       __ addi(R4_ARG2, R4_ARG2, 32);
1584       __ bdnz(l_6);
1585     }
1586 
1587     // copy 1 element at a time
1588     __ bind(l_2);
1589     __ cmpwi(CCR0, R5_ARG3, 0);
1590     __ beq(CCR0, l_1);
1591 
1592     { // FasterArrayCopy
1593       __ mtctr(R5_ARG3);
1594       __ addi(R3_ARG1, R3_ARG1, -4);
1595       __ addi(R4_ARG2, R4_ARG2, -4);
1596 
1597       __ bind(l_3);
1598       __ lwzu(tmp2, 4, R3_ARG1);
1599       __ stwu(tmp2, 4, R4_ARG2);
1600       __ bdnz(l_3);
1601     }
1602 
1603     __ bind(l_1);
1604     return;
1605   }
1606 
1607   // Generate stub for disjoint int copy.  If "aligned" is true, the
1608   // "from" and "to" addresses are assumed to be heapword aligned.
1609   //
1610   // Arguments for generated stub:
1611   //      from:  R3_ARG1
1612   //      to:    R4_ARG2
1613   //      count: R5_ARG3 treated as signed
1614   //
1615   address generate_disjoint_int_copy(bool aligned, const char * name) {
1616     StubCodeMark mark(this, "StubRoutines", name);
1617     address start = __ function_entry();
1618     generate_disjoint_int_copy_core(aligned);
1619     __ blr();
1620     return start;
1621   }
1622 
1623   // Generate core code for conjoint int copy (and oop copy on
1624   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1625   // are assumed to be heapword aligned.
1626   //
1627   // Arguments:
1628   //      from:  R3_ARG1
1629   //      to:    R4_ARG2
1630   //      count: R5_ARG3 treated as signed
1631   //
1632   void generate_conjoint_int_copy_core(bool aligned) {
1633     // Do reverse copy.  We assume the case of actual overlap is rare enough
1634     // that we don't have to optimize it.
1635 
1636     Label l_1, l_2, l_3, l_4, l_5, l_6;
1637 
1638     Register tmp1 = R6_ARG4;
1639     Register tmp2 = R7_ARG5;
1640     Register tmp3 = R8_ARG6;
1641     Register tmp4 = R0;
1642 
1643     { // FasterArrayCopy
1644       __ cmpwi(CCR0, R5_ARG3, 0);
1645       __ beq(CCR0, l_6);
1646 
1647       __ sldi(R5_ARG3, R5_ARG3, 2);
1648       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1649       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1650       __ srdi(R5_ARG3, R5_ARG3, 2);
1651 
1652       __ cmpwi(CCR0, R5_ARG3, 7);
1653       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1654 
1655       __ srdi(tmp1, R5_ARG3, 3);
1656       __ andi(R5_ARG3, R5_ARG3, 7);
1657       __ mtctr(tmp1);
1658 
1659       __ bind(l_4);
1660       // Use unrolled version for mass copying (copy 4 elements a time).
1661       // Load feeding store gets zero latency on Power6, however not on Power5.
1662       // Therefore, the following sequence is made for the good of both.
1663       __ addi(R3_ARG1, R3_ARG1, -32);
1664       __ addi(R4_ARG2, R4_ARG2, -32);
1665       __ ld(tmp4, 24, R3_ARG1);
1666       __ ld(tmp3, 16, R3_ARG1);
1667       __ ld(tmp2, 8, R3_ARG1);
1668       __ ld(tmp1, 0, R3_ARG1);
1669       __ std(tmp4, 24, R4_ARG2);
1670       __ std(tmp3, 16, R4_ARG2);
1671       __ std(tmp2, 8, R4_ARG2);
1672       __ std(tmp1, 0, R4_ARG2);
1673       __ bdnz(l_4);
1674 
1675       __ cmpwi(CCR0, R5_ARG3, 0);
1676       __ beq(CCR0, l_6);
1677 
1678       __ bind(l_5);
1679       __ mtctr(R5_ARG3);
1680       __ bind(l_3);
1681       __ lwz(R0, -4, R3_ARG1);
1682       __ stw(R0, -4, R4_ARG2);
1683       __ addi(R3_ARG1, R3_ARG1, -4);
1684       __ addi(R4_ARG2, R4_ARG2, -4);
1685       __ bdnz(l_3);
1686 
1687       __ bind(l_6);
1688     }
1689   }
1690 
1691   // Generate stub for conjoint int copy.  If "aligned" is true, the
1692   // "from" and "to" addresses are assumed to be heapword aligned.
1693   //
1694   // Arguments for generated stub:
1695   //      from:  R3_ARG1
1696   //      to:    R4_ARG2
1697   //      count: R5_ARG3 treated as signed
1698   //
1699   address generate_conjoint_int_copy(bool aligned, const char * name) {
1700     StubCodeMark mark(this, "StubRoutines", name);
1701     address start = __ function_entry();
1702 
1703 #if defined(ABI_ELFv2)
1704     address nooverlap_target = aligned ?
1705       StubRoutines::arrayof_jint_disjoint_arraycopy() :
1706       StubRoutines::jint_disjoint_arraycopy();
1707 #else
1708     address nooverlap_target = aligned ?
1709       ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
1710       ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
1711 #endif
1712 
1713     array_overlap_test(nooverlap_target, 2);
1714 
1715     generate_conjoint_int_copy_core(aligned);
1716 
1717     __ blr();
1718 
1719     return start;
1720   }
1721 
1722   // Generate core code for disjoint long copy (and oop copy on
1723   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1724   // are assumed to be heapword aligned.
1725   //
1726   // Arguments:
1727   //      from:  R3_ARG1
1728   //      to:    R4_ARG2
1729   //      count: R5_ARG3 treated as signed
1730   //
1731   void generate_disjoint_long_copy_core(bool aligned) {
1732     Register tmp1 = R6_ARG4;
1733     Register tmp2 = R7_ARG5;
1734     Register tmp3 = R8_ARG6;
1735     Register tmp4 = R0;
1736 
1737     Label l_1, l_2, l_3, l_4;
1738 
1739     { // FasterArrayCopy
1740       __ cmpwi(CCR0, R5_ARG3, 3);
1741       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1742 
1743       __ srdi(tmp1, R5_ARG3, 2);
1744       __ andi_(R5_ARG3, R5_ARG3, 3);
1745       __ mtctr(tmp1);
1746 
1747       __ bind(l_4);
1748       // Use unrolled version for mass copying (copy 4 elements a time).
1749       // Load feeding store gets zero latency on Power6, however not on Power5.
1750       // Therefore, the following sequence is made for the good of both.
1751       __ ld(tmp1, 0, R3_ARG1);
1752       __ ld(tmp2, 8, R3_ARG1);
1753       __ ld(tmp3, 16, R3_ARG1);
1754       __ ld(tmp4, 24, R3_ARG1);
1755       __ std(tmp1, 0, R4_ARG2);
1756       __ std(tmp2, 8, R4_ARG2);
1757       __ std(tmp3, 16, R4_ARG2);
1758       __ std(tmp4, 24, R4_ARG2);
1759       __ addi(R3_ARG1, R3_ARG1, 32);
1760       __ addi(R4_ARG2, R4_ARG2, 32);
1761       __ bdnz(l_4);
1762     }
1763 
1764     // copy 1 element at a time
1765     __ bind(l_3);
1766     __ cmpwi(CCR0, R5_ARG3, 0);
1767     __ beq(CCR0, l_1);
1768 
1769     { // FasterArrayCopy
1770       __ mtctr(R5_ARG3);
1771       __ addi(R3_ARG1, R3_ARG1, -8);
1772       __ addi(R4_ARG2, R4_ARG2, -8);
1773 
1774       __ bind(l_2);
1775       __ ldu(R0, 8, R3_ARG1);
1776       __ stdu(R0, 8, R4_ARG2);
1777       __ bdnz(l_2);
1778 
1779     }
1780     __ bind(l_1);
1781   }
1782 
1783   // Generate stub for disjoint long copy.  If "aligned" is true, the
1784   // "from" and "to" addresses are assumed to be heapword aligned.
1785   //
1786   // Arguments for generated stub:
1787   //      from:  R3_ARG1
1788   //      to:    R4_ARG2
1789   //      count: R5_ARG3 treated as signed
1790   //
1791   address generate_disjoint_long_copy(bool aligned, const char * name) {
1792     StubCodeMark mark(this, "StubRoutines", name);
1793     address start = __ function_entry();
1794     generate_disjoint_long_copy_core(aligned);
1795     __ blr();
1796 
1797     return start;
1798   }
1799 
1800   // Generate core code for conjoint long copy (and oop copy on
1801   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1802   // are assumed to be heapword aligned.
1803   //
1804   // Arguments:
1805   //      from:  R3_ARG1
1806   //      to:    R4_ARG2
1807   //      count: R5_ARG3 treated as signed
1808   //
1809   void generate_conjoint_long_copy_core(bool aligned) {
1810     Register tmp1 = R6_ARG4;
1811     Register tmp2 = R7_ARG5;
1812     Register tmp3 = R8_ARG6;
1813     Register tmp4 = R0;
1814 
1815     Label l_1, l_2, l_3, l_4, l_5;
1816 
1817     __ cmpwi(CCR0, R5_ARG3, 0);
1818     __ beq(CCR0, l_1);
1819 
1820     { // FasterArrayCopy
1821       __ sldi(R5_ARG3, R5_ARG3, 3);
1822       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1823       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1824       __ srdi(R5_ARG3, R5_ARG3, 3);
1825 
1826       __ cmpwi(CCR0, R5_ARG3, 3);
1827       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1828 
1829       __ srdi(tmp1, R5_ARG3, 2);
1830       __ andi(R5_ARG3, R5_ARG3, 3);
1831       __ mtctr(tmp1);
1832 
1833       __ bind(l_4);
1834       // Use unrolled version for mass copying (copy 4 elements a time).
1835       // Load feeding store gets zero latency on Power6, however not on Power5.
1836       // Therefore, the following sequence is made for the good of both.
1837       __ addi(R3_ARG1, R3_ARG1, -32);
1838       __ addi(R4_ARG2, R4_ARG2, -32);
1839       __ ld(tmp4, 24, R3_ARG1);
1840       __ ld(tmp3, 16, R3_ARG1);
1841       __ ld(tmp2, 8, R3_ARG1);
1842       __ ld(tmp1, 0, R3_ARG1);
1843       __ std(tmp4, 24, R4_ARG2);
1844       __ std(tmp3, 16, R4_ARG2);
1845       __ std(tmp2, 8, R4_ARG2);
1846       __ std(tmp1, 0, R4_ARG2);
1847       __ bdnz(l_4);
1848 
1849       __ cmpwi(CCR0, R5_ARG3, 0);
1850       __ beq(CCR0, l_1);
1851 
1852       __ bind(l_5);
1853       __ mtctr(R5_ARG3);
1854       __ bind(l_3);
1855       __ ld(R0, -8, R3_ARG1);
1856       __ std(R0, -8, R4_ARG2);
1857       __ addi(R3_ARG1, R3_ARG1, -8);
1858       __ addi(R4_ARG2, R4_ARG2, -8);
1859       __ bdnz(l_3);
1860 
1861     }
1862     __ bind(l_1);
1863   }
1864 
1865   // Generate stub for conjoint long copy.  If "aligned" is true, the
1866   // "from" and "to" addresses are assumed to be heapword aligned.
1867   //
1868   // Arguments for generated stub:
1869   //      from:  R3_ARG1
1870   //      to:    R4_ARG2
1871   //      count: R5_ARG3 treated as signed
1872   //
1873   address generate_conjoint_long_copy(bool aligned, const char * name) {
1874     StubCodeMark mark(this, "StubRoutines", name);
1875     address start = __ function_entry();
1876 
1877 #if defined(ABI_ELFv2)
1878     address nooverlap_target = aligned ?
1879       StubRoutines::arrayof_jlong_disjoint_arraycopy() :
1880       StubRoutines::jlong_disjoint_arraycopy();
1881 #else
1882     address nooverlap_target = aligned ?
1883       ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
1884       ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
1885 #endif
1886 
1887     array_overlap_test(nooverlap_target, 3);
1888     generate_conjoint_long_copy_core(aligned);
1889 
1890     __ blr();
1891 
1892     return start;
1893   }
1894 
1895   // Generate stub for conjoint oop copy.  If "aligned" is true, the
1896   // "from" and "to" addresses are assumed to be heapword aligned.
1897   //
1898   // Arguments for generated stub:
1899   //      from:  R3_ARG1
1900   //      to:    R4_ARG2
1901   //      count: R5_ARG3 treated as signed
1902   //      dest_uninitialized: G1 support
1903   //
1904   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1905     StubCodeMark mark(this, "StubRoutines", name);
1906 
1907     address start = __ function_entry();
1908 
1909 #if defined(ABI_ELFv2)
1910     address nooverlap_target = aligned ?
1911       StubRoutines::arrayof_oop_disjoint_arraycopy() :
1912       StubRoutines::oop_disjoint_arraycopy();
1913 #else
1914     address nooverlap_target = aligned ?
1915       ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
1916       ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
1917 #endif
1918 
1919     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1920 
1921     // Save arguments.
1922     __ mr(R9_ARG7, R4_ARG2);
1923     __ mr(R10_ARG8, R5_ARG3);
1924 
1925     if (UseCompressedOops) {
1926       array_overlap_test(nooverlap_target, 2);
1927       generate_conjoint_int_copy_core(aligned);
1928     } else {
1929       array_overlap_test(nooverlap_target, 3);
1930       generate_conjoint_long_copy_core(aligned);
1931     }
1932 
1933     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
1934     return start;
1935   }
1936 
1937   // Generate stub for disjoint oop copy.  If "aligned" is true, the
1938   // "from" and "to" addresses are assumed to be heapword aligned.
1939   //
1940   // Arguments for generated stub:
1941   //      from:  R3_ARG1
1942   //      to:    R4_ARG2
1943   //      count: R5_ARG3 treated as signed
1944   //      dest_uninitialized: G1 support
1945   //
1946   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1947     StubCodeMark mark(this, "StubRoutines", name);
1948     address start = __ function_entry();
1949 
1950     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1951 
1952     // save some arguments, disjoint_long_copy_core destroys them.
1953     // needed for post barrier
1954     __ mr(R9_ARG7, R4_ARG2);
1955     __ mr(R10_ARG8, R5_ARG3);
1956 
1957     if (UseCompressedOops) {
1958       generate_disjoint_int_copy_core(aligned);
1959     } else {
1960       generate_disjoint_long_copy_core(aligned);
1961     }
1962 
1963     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
1964 
1965     return start;
1966   }
1967 
1968   void generate_arraycopy_stubs() {
1969     // Note: the disjoint stubs must be generated first, some of
1970     // the conjoint stubs use them.
1971 
1972     // non-aligned disjoint versions
1973     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
1974     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
1975     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
1976     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
1977     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
1978     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
1979 
1980     // aligned disjoint versions
1981     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
1982     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
1983     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
1984     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
1985     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
1986     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
1987 
1988     // non-aligned conjoint versions
1989     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
1990     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
1991     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
1992     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
1993     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
1994     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
1995 
1996     // aligned conjoint versions
1997     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
1998     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
1999     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
2000     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
2001     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
2002     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
2003 
2004     // fill routines
2005     StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
2006     StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
2007     StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
2008     StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
2009     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2010     StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
2011   }
2012 
2013   // Safefetch stubs.
2014   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2015     // safefetch signatures:
2016     //   int      SafeFetch32(int*      adr, int      errValue);
2017     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2018     //
2019     // arguments:
2020     //   R3_ARG1 = adr
2021     //   R4_ARG2 = errValue
2022     //
2023     // result:
2024     //   R3_RET  = *adr or errValue
2025 
2026     StubCodeMark mark(this, "StubRoutines", name);
2027 
2028     // Entry point, pc or function descriptor.
2029     *entry = __ function_entry();
2030 
2031     // Load *adr into R4_ARG2, may fault.
2032     *fault_pc = __ pc();
2033     switch (size) {
2034       case 4:
2035         // int32_t, signed extended
2036         __ lwa(R4_ARG2, 0, R3_ARG1);
2037         break;
2038       case 8:
2039         // int64_t
2040         __ ld(R4_ARG2, 0, R3_ARG1);
2041         break;
2042       default:
2043         ShouldNotReachHere();
2044     }
2045 
2046     // return errValue or *adr
2047     *continuation_pc = __ pc();
2048     __ mr(R3_RET, R4_ARG2);
2049     __ blr();
2050   }
2051 
2052   // Initialization
2053   void generate_initial() {
2054     // Generates all stubs and initializes the entry points
2055 
2056     // Entry points that exist in all platforms.
2057     // Note: This is code that could be shared among different platforms - however the
2058     // benefit seems to be smaller than the disadvantage of having a
2059     // much more complicated generator structure. See also comment in
2060     // stubRoutines.hpp.
2061 
2062     StubRoutines::_forward_exception_entry          = generate_forward_exception();
2063     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
2064     StubRoutines::_catch_exception_entry            = generate_catch_exception();
2065 
2066     // Build this early so it's available for the interpreter.
2067     StubRoutines::_throw_StackOverflowError_entry   =
2068       generate_throw_exception("StackOverflowError throw_exception",
2069                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2070   }
2071 
2072   void generate_all() {
2073     // Generates all stubs and initializes the entry points
2074 
2075     // These entry points require SharedInfo::stack0 to be set up in
2076     // non-core builds
2077     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
2078     // Handle IncompatibleClassChangeError in itable stubs.
2079     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
2080     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2081 
2082     StubRoutines::_handler_for_unsafe_access_entry         = generate_handler_for_unsafe_access();
2083 
2084     // support for verify_oop (must happen after universe_init)
2085     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
2086 
2087     // arraycopy stubs used by compilers
2088     generate_arraycopy_stubs();
2089 
2090     if (UseAESIntrinsics) {
2091       guarantee(!UseAESIntrinsics, "not yet implemented.");
2092     }
2093 
2094     // Safefetch stubs.
2095     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
2096                                                        &StubRoutines::_safefetch32_fault_pc,
2097                                                        &StubRoutines::_safefetch32_continuation_pc);
2098     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
2099                                                        &StubRoutines::_safefetchN_fault_pc,
2100                                                        &StubRoutines::_safefetchN_continuation_pc);
2101   }
2102 
2103  public:
2104   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2105     // replace the standard masm with a special one:
2106     _masm = new MacroAssembler(code);
2107     if (all) {
2108       generate_all();
2109     } else {
2110       generate_initial();
2111     }
2112   }
2113 };
2114 
2115 void StubGenerator_generate(CodeBuffer* code, bool all) {
2116   StubGenerator g(code, all);
2117 }