1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "nativeInst_ppc.hpp"
  30 #include "oops/instanceOop.hpp"
  31 #include "oops/method.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/frame.inline.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubCodeGenerator.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/top.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 
  43 #define __ _masm->
  44 
  45 #ifdef PRODUCT
  46 #define BLOCK_COMMENT(str) // nothing
  47 #else
  48 #define BLOCK_COMMENT(str) __ block_comment(str)
  49 #endif
  50 
  51 class StubGenerator: public StubCodeGenerator {
  52  private:
  53 
  54   // Call stubs are used to call Java from C
  55   //
  56   // Arguments:
  57   //
  58   //   R3  - call wrapper address     : address
  59   //   R4  - result                   : intptr_t*
  60   //   R5  - result type              : BasicType
  61   //   R6  - method                   : Method
  62   //   R7  - frame mgr entry point    : address
  63   //   R8  - parameter block          : intptr_t*
  64   //   R9  - parameter count in words : int
  65   //   R10 - thread                   : Thread*
  66   //
  67   address generate_call_stub(address& return_address) {
  68     // Setup a new c frame, copy java arguments, call frame manager or
  69     // native_entry, and process result.
  70 
  71     StubCodeMark mark(this, "StubRoutines", "call_stub");
  72 
  73     address start = __ function_entry();
  74 
  75     // some sanity checks
  76     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
  77     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
  78     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
  79     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
  80     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
  81 
  82     Register r_arg_call_wrapper_addr        = R3;
  83     Register r_arg_result_addr              = R4;
  84     Register r_arg_result_type              = R5;
  85     Register r_arg_method                   = R6;
  86     Register r_arg_entry                    = R7;
  87     Register r_arg_thread                   = R10;
  88 
  89     Register r_temp                         = R24;
  90     Register r_top_of_arguments_addr        = R25;
  91     Register r_entryframe_fp                = R26;
  92 
  93     {
  94       // Stack on entry to call_stub:
  95       //
  96       //      F1      [C_FRAME]
  97       //              ...
  98 
  99       Register r_arg_argument_addr          = R8;
 100       Register r_arg_argument_count         = R9;
 101       Register r_frame_alignment_in_bytes   = R27;
 102       Register r_argument_addr              = R28;
 103       Register r_argumentcopy_addr          = R29;
 104       Register r_argument_size_in_bytes     = R30;
 105       Register r_frame_size                 = R23;
 106 
 107       Label arguments_copied;
 108 
 109       // Save LR/CR to caller's C_FRAME.
 110       __ save_LR_CR(R0);
 111 
 112       // Zero extend arg_argument_count.
 113       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
 114 
 115       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
 116       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 117 
 118       // Keep copy of our frame pointer (caller's SP).
 119       __ mr(r_entryframe_fp, R1_SP);
 120 
 121       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
 122       // Push ENTRY_FRAME including arguments:
 123       //
 124       //      F0      [TOP_IJAVA_FRAME_ABI]
 125       //              alignment (optional)
 126       //              [outgoing Java arguments]
 127       //              [ENTRY_FRAME_LOCALS]
 128       //      F1      [C_FRAME]
 129       //              ...
 130 
 131       // calculate frame size
 132 
 133       // unaligned size of arguments
 134       __ sldi(r_argument_size_in_bytes,
 135                   r_arg_argument_count, Interpreter::logStackElementSize);
 136       // arguments alignment (max 1 slot)
 137       // FIXME: use round_to() here
 138       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
 139       __ sldi(r_frame_alignment_in_bytes,
 140               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
 141 
 142       // size = unaligned size of arguments + top abi's size
 143       __ addi(r_frame_size, r_argument_size_in_bytes,
 144               frame::top_ijava_frame_abi_size);
 145       // size += arguments alignment
 146       __ add(r_frame_size,
 147              r_frame_size, r_frame_alignment_in_bytes);
 148       // size += size of call_stub locals
 149       __ addi(r_frame_size,
 150               r_frame_size, frame::entry_frame_locals_size);
 151 
 152       // push ENTRY_FRAME
 153       __ push_frame(r_frame_size, r_temp);
 154 
 155       // initialize call_stub locals (step 1)
 156       __ std(r_arg_call_wrapper_addr,
 157              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
 158       __ std(r_arg_result_addr,
 159              _entry_frame_locals_neg(result_address), r_entryframe_fp);
 160       __ std(r_arg_result_type,
 161              _entry_frame_locals_neg(result_type), r_entryframe_fp);
 162       // we will save arguments_tos_address later
 163 
 164 
 165       BLOCK_COMMENT("Copy Java arguments");
 166       // copy Java arguments
 167 
 168       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
 169       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
 170       __ addi(r_top_of_arguments_addr,
 171               R1_SP, frame::top_ijava_frame_abi_size);
 172       __ add(r_top_of_arguments_addr,
 173              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
 174 
 175       // any arguments to copy?
 176       __ cmpdi(CCR0, r_arg_argument_count, 0);
 177       __ beq(CCR0, arguments_copied);
 178 
 179       // prepare loop and copy arguments in reverse order
 180       {
 181         // init CTR with arg_argument_count
 182         __ mtctr(r_arg_argument_count);
 183 
 184         // let r_argumentcopy_addr point to last outgoing Java arguments P
 185         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
 186 
 187         // let r_argument_addr point to last incoming java argument
 188         __ add(r_argument_addr,
 189                    r_arg_argument_addr, r_argument_size_in_bytes);
 190         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 191 
 192         // now loop while CTR > 0 and copy arguments
 193         {
 194           Label next_argument;
 195           __ bind(next_argument);
 196 
 197           __ ld(r_temp, 0, r_argument_addr);
 198           // argument_addr--;
 199           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 200           __ std(r_temp, 0, r_argumentcopy_addr);
 201           // argumentcopy_addr++;
 202           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
 203 
 204           __ bdnz(next_argument);
 205         }
 206       }
 207 
 208       // Arguments copied, continue.
 209       __ bind(arguments_copied);
 210     }
 211 
 212     {
 213       BLOCK_COMMENT("Call frame manager or native entry.");
 214       // Call frame manager or native entry.
 215       Register r_new_arg_entry = R14;
 216       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
 217                                  r_arg_method, r_arg_thread);
 218 
 219       __ mr(r_new_arg_entry, r_arg_entry);
 220 
 221       // Register state on entry to frame manager / native entry:
 222       //
 223       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
 224       //   R19_method  -  Method
 225       //   R16_thread  -  JavaThread*
 226 
 227       // Tos must point to last argument - element_size.
 228 #ifdef CC_INTERP
 229       const Register tos = R17_tos;
 230 #else
 231       const Register tos = R15_esp;
 232 #endif
 233       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
 234 
 235       // initialize call_stub locals (step 2)
 236       // now save tos as arguments_tos_address
 237       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
 238 
 239       // load argument registers for call
 240       __ mr(R19_method, r_arg_method);
 241       __ mr(R16_thread, r_arg_thread);
 242       assert(tos != r_arg_method, "trashed r_arg_method");
 243       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
 244 
 245       // Set R15_prev_state to 0 for simplifying checks in callee.
 246 #ifdef CC_INTERP
 247       __ li(R15_prev_state, 0);
 248 #else
 249       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
 250 #endif
 251       // Stack on entry to frame manager / native entry:
 252       //
 253       //      F0      [TOP_IJAVA_FRAME_ABI]
 254       //              alignment (optional)
 255       //              [outgoing Java arguments]
 256       //              [ENTRY_FRAME_LOCALS]
 257       //      F1      [C_FRAME]
 258       //              ...
 259       //
 260 
 261       // global toc register
 262       __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
 263 
 264       // Load narrow oop base.
 265       __ reinit_heapbase(R30, R11_scratch1);
 266 
 267       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
 268       // when called via a c2i.
 269 
 270       // Pass initial_caller_sp to framemanager.
 271       __ mr(R21_tmp1, R1_SP);
 272 
 273       // Do a light-weight C-call here, r_new_arg_entry holds the address
 274       // of the interpreter entry point (frame manager or native entry)
 275       // and save runtime-value of LR in return_address.
 276       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
 277              "trashed r_new_arg_entry");
 278       return_address = __ call_stub(r_new_arg_entry);
 279     }
 280 
 281     {
 282       BLOCK_COMMENT("Returned from frame manager or native entry.");
 283       // Returned from frame manager or native entry.
 284       // Now pop frame, process result, and return to caller.
 285 
 286       // Stack on exit from frame manager / native entry:
 287       //
 288       //      F0      [ABI]
 289       //              ...
 290       //              [ENTRY_FRAME_LOCALS]
 291       //      F1      [C_FRAME]
 292       //              ...
 293       //
 294       // Just pop the topmost frame ...
 295       //
 296 
 297       Label ret_is_object;
 298       Label ret_is_long;
 299       Label ret_is_float;
 300       Label ret_is_double;
 301 
 302       Register r_entryframe_fp = R30;
 303       Register r_lr            = R7_ARG5;
 304       Register r_cr            = R8_ARG6;
 305 
 306       // Reload some volatile registers which we've spilled before the call
 307       // to frame manager / native entry.
 308       // Access all locals via frame pointer, because we know nothing about
 309       // the topmost frame's size.
 310       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
 311       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
 312       __ ld(r_arg_result_addr,
 313             _entry_frame_locals_neg(result_address), r_entryframe_fp);
 314       __ ld(r_arg_result_type,
 315             _entry_frame_locals_neg(result_type), r_entryframe_fp);
 316       __ ld(r_cr, _abi(cr), r_entryframe_fp);
 317       __ ld(r_lr, _abi(lr), r_entryframe_fp);
 318 
 319       // pop frame and restore non-volatiles, LR and CR
 320       __ mr(R1_SP, r_entryframe_fp);
 321       __ mtcr(r_cr);
 322       __ mtlr(r_lr);
 323 
 324       // Store result depending on type. Everything that is not
 325       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
 326       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
 327       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
 328       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
 329       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
 330 
 331       // restore non-volatile registers
 332       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 333 
 334 
 335       // Stack on exit from call_stub:
 336       //
 337       //      0       [C_FRAME]
 338       //              ...
 339       //
 340       //  no call_stub frames left.
 341 
 342       // All non-volatiles have been restored at this point!!
 343       assert(R3_RET == R3, "R3_RET should be R3");
 344 
 345       __ beq(CCR0, ret_is_object);
 346       __ beq(CCR1, ret_is_long);
 347       __ beq(CCR5, ret_is_float);
 348       __ beq(CCR6, ret_is_double);
 349 
 350       // default:
 351       __ stw(R3_RET, 0, r_arg_result_addr);
 352       __ blr(); // return to caller
 353 
 354       // case T_OBJECT:
 355       __ bind(ret_is_object);
 356       __ std(R3_RET, 0, r_arg_result_addr);
 357       __ blr(); // return to caller
 358 
 359       // case T_LONG:
 360       __ bind(ret_is_long);
 361       __ std(R3_RET, 0, r_arg_result_addr);
 362       __ blr(); // return to caller
 363 
 364       // case T_FLOAT:
 365       __ bind(ret_is_float);
 366       __ stfs(F1_RET, 0, r_arg_result_addr);
 367       __ blr(); // return to caller
 368 
 369       // case T_DOUBLE:
 370       __ bind(ret_is_double);
 371       __ stfd(F1_RET, 0, r_arg_result_addr);
 372       __ blr(); // return to caller
 373     }
 374 
 375     return start;
 376   }
 377 
 378   // Return point for a Java call if there's an exception thrown in
 379   // Java code.  The exception is caught and transformed into a
 380   // pending exception stored in JavaThread that can be tested from
 381   // within the VM.
 382   //
 383   address generate_catch_exception() {
 384     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 385 
 386     address start = __ pc();
 387 
 388     // Registers alive
 389     //
 390     //  R16_thread
 391     //  R3_ARG1 - address of pending exception
 392     //  R4_ARG2 - return address in call stub
 393 
 394     const Register exception_file = R21_tmp1;
 395     const Register exception_line = R22_tmp2;
 396 
 397     __ load_const(exception_file, (void*)__FILE__);
 398     __ load_const(exception_line, (void*)__LINE__);
 399 
 400     __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
 401     // store into `char *'
 402     __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
 403     // store into `int'
 404     __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
 405 
 406     // complete return to VM
 407     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 408 
 409     __ mtlr(R4_ARG2);
 410     // continue in call stub
 411     __ blr();
 412 
 413     return start;
 414   }
 415 
 416   // Continuation point for runtime calls returning with a pending
 417   // exception.  The pending exception check happened in the runtime
 418   // or native call stub.  The pending exception in Thread is
 419   // converted into a Java-level exception.
 420   //
 421   address generate_forward_exception() {
 422     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 423     address start = __ pc();
 424 
 425 #if !defined(PRODUCT)
 426     if (VerifyOops) {
 427       // Get pending exception oop.
 428       __ ld(R3_ARG1,
 429                 in_bytes(Thread::pending_exception_offset()),
 430                 R16_thread);
 431       // Make sure that this code is only executed if there is a pending exception.
 432       {
 433         Label L;
 434         __ cmpdi(CCR0, R3_ARG1, 0);
 435         __ bne(CCR0, L);
 436         __ stop("StubRoutines::forward exception: no pending exception (1)");
 437         __ bind(L);
 438       }
 439       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
 440     }
 441 #endif
 442 
 443     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
 444     __ save_LR_CR(R4_ARG2);
 445     __ push_frame_reg_args(0, R0);
 446     // Find exception handler.
 447     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 448                      SharedRuntime::exception_handler_for_return_address),
 449                     R16_thread,
 450                     R4_ARG2);
 451     // Copy handler's address.
 452     __ mtctr(R3_RET);
 453     __ pop_frame();
 454     __ restore_LR_CR(R0);
 455 
 456     // Set up the arguments for the exception handler:
 457     //  - R3_ARG1: exception oop
 458     //  - R4_ARG2: exception pc.
 459 
 460     // Load pending exception oop.
 461     __ ld(R3_ARG1,
 462               in_bytes(Thread::pending_exception_offset()),
 463               R16_thread);
 464 
 465     // The exception pc is the return address in the caller.
 466     // Must load it into R4_ARG2.
 467     __ mflr(R4_ARG2);
 468 
 469 #ifdef ASSERT
 470     // Make sure exception is set.
 471     {
 472       Label L;
 473       __ cmpdi(CCR0, R3_ARG1, 0);
 474       __ bne(CCR0, L);
 475       __ stop("StubRoutines::forward exception: no pending exception (2)");
 476       __ bind(L);
 477     }
 478 #endif
 479 
 480     // Clear the pending exception.
 481     __ li(R0, 0);
 482     __ std(R0,
 483                in_bytes(Thread::pending_exception_offset()),
 484                R16_thread);
 485     // Jump to exception handler.
 486     __ bctr();
 487 
 488     return start;
 489   }
 490 
 491 #undef __
 492 #define __ masm->
 493   // Continuation point for throwing of implicit exceptions that are
 494   // not handled in the current activation. Fabricates an exception
 495   // oop and initiates normal exception dispatching in this
 496   // frame. Only callee-saved registers are preserved (through the
 497   // normal register window / RegisterMap handling).  If the compiler
 498   // needs all registers to be preserved between the fault point and
 499   // the exception handler then it must assume responsibility for that
 500   // in AbstractCompiler::continuation_for_implicit_null_exception or
 501   // continuation_for_implicit_division_by_zero_exception. All other
 502   // implicit exceptions (e.g., NullPointerException or
 503   // AbstractMethodError on entry) are either at call sites or
 504   // otherwise assume that stack unwinding will be initiated, so
 505   // caller saved registers were assumed volatile in the compiler.
 506   //
 507   // Note that we generate only this stub into a RuntimeStub, because
 508   // it needs to be properly traversed and ignored during GC, so we
 509   // change the meaning of the "__" macro within this method.
 510   //
 511   // Note: the routine set_pc_not_at_call_for_caller in
 512   // SharedRuntime.cpp requires that this code be generated into a
 513   // RuntimeStub.
 514   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 515                                    Register arg1 = noreg, Register arg2 = noreg) {
 516     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
 517     MacroAssembler* masm = new MacroAssembler(&code);
 518 
 519     OopMapSet* oop_maps  = new OopMapSet();
 520     int frame_size_in_bytes = frame::abi_reg_args_size;
 521     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
 522 
 523     address start = __ pc();
 524 
 525     __ save_LR_CR(R11_scratch1);
 526 
 527     // Push a frame.
 528     __ push_frame_reg_args(0, R11_scratch1);
 529 
 530     address frame_complete_pc = __ pc();
 531 
 532     if (restore_saved_exception_pc) {
 533       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
 534     }
 535 
 536     // Note that we always have a runtime stub frame on the top of
 537     // stack by this point. Remember the offset of the instruction
 538     // whose address will be moved to R11_scratch1.
 539     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
 540 
 541     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
 542 
 543     __ mr(R3_ARG1, R16_thread);
 544     if (arg1 != noreg) {
 545       __ mr(R4_ARG2, arg1);
 546     }
 547     if (arg2 != noreg) {
 548       __ mr(R5_ARG3, arg2);
 549     }
 550 #if defined(ABI_ELFv2)
 551     __ call_c(runtime_entry, relocInfo::none);
 552 #else
 553     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
 554 #endif
 555 
 556     // Set an oopmap for the call site.
 557     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
 558 
 559     __ reset_last_Java_frame();
 560 
 561 #ifdef ASSERT
 562     // Make sure that this code is only executed if there is a pending
 563     // exception.
 564     {
 565       Label L;
 566       __ ld(R0,
 567                 in_bytes(Thread::pending_exception_offset()),
 568                 R16_thread);
 569       __ cmpdi(CCR0, R0, 0);
 570       __ bne(CCR0, L);
 571       __ stop("StubRoutines::throw_exception: no pending exception");
 572       __ bind(L);
 573     }
 574 #endif
 575 
 576     // Pop frame.
 577     __ pop_frame();
 578 
 579     __ restore_LR_CR(R11_scratch1);
 580 
 581     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
 582     __ mtctr(R11_scratch1);
 583     __ bctr();
 584 
 585     // Create runtime stub with OopMap.
 586     RuntimeStub* stub =
 587       RuntimeStub::new_runtime_stub(name, &code,
 588                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
 589                                     frame_size_in_bytes/wordSize,
 590                                     oop_maps,
 591                                     false);
 592     return stub->entry_point();
 593   }
 594 #undef __
 595 #define __ _masm->
 596 
 597   //  Generate G1 pre-write barrier for array.
 598   //
 599   //  Input:
 600   //     from     - register containing src address (only needed for spilling)
 601   //     to       - register containing starting address
 602   //     count    - register containing element count
 603   //     tmp      - scratch register
 604   //
 605   //  Kills:
 606   //     nothing
 607   //
 608   void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) {
 609     BarrierSet* const bs = Universe::heap()->barrier_set();
 610     switch (bs->kind()) {
 611       case BarrierSet::G1SATBCTLogging:
 612         // With G1, don't generate the call if we statically know that the target in uninitialized
 613         if (!dest_uninitialized) {
 614           const int spill_slots = 4 * wordSize;
 615           const int frame_size  = frame::abi_reg_args_size + spill_slots;
 616           Label filtered;
 617 
 618           // Is marking active?
 619           if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
 620             __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
 621           } else {
 622             guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
 623             __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
 624           }
 625           __ cmpdi(CCR0, Rtmp1, 0);
 626           __ beq(CCR0, filtered);
 627 
 628           __ save_LR_CR(R0);
 629           __ push_frame_reg_args(spill_slots, R0);
 630           __ std(from,  frame_size - 1 * wordSize, R1_SP);
 631           __ std(to,    frame_size - 2 * wordSize, R1_SP);
 632           __ std(count, frame_size - 3 * wordSize, R1_SP);
 633 
 634           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
 635 
 636           __ ld(from,  frame_size - 1 * wordSize, R1_SP);
 637           __ ld(to,    frame_size - 2 * wordSize, R1_SP);
 638           __ ld(count, frame_size - 3 * wordSize, R1_SP);
 639           __ pop_frame();
 640           __ restore_LR_CR(R0);
 641 
 642           __ bind(filtered);
 643         }
 644         break;
 645       case BarrierSet::CardTableModRef:
 646       case BarrierSet::CardTableExtension:
 647       case BarrierSet::ModRef:
 648         break;
 649       default:
 650         ShouldNotReachHere();
 651     }
 652   }
 653 
 654   //  Generate CMS/G1 post-write barrier for array.
 655   //
 656   //  Input:
 657   //     addr     - register containing starting address
 658   //     count    - register containing element count
 659   //     tmp      - scratch register
 660   //
 661   //  The input registers and R0 are overwritten.
 662   //
 663   void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) {
 664     BarrierSet* const bs = Universe::heap()->barrier_set();
 665 
 666     switch (bs->kind()) {
 667       case BarrierSet::G1SATBCTLogging:
 668         {
 669           if (branchToEnd) {
 670             __ save_LR_CR(R0);
 671             // We need this frame only to spill LR.
 672             __ push_frame_reg_args(0, R0);
 673             __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
 674             __ pop_frame();
 675             __ restore_LR_CR(R0);
 676           } else {
 677             // Tail call: fake call from stub caller by branching without linking.
 678             address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
 679             __ mr_if_needed(R3_ARG1, addr);
 680             __ mr_if_needed(R4_ARG2, count);
 681             __ load_const(R11, entry_point, R0);
 682             __ call_c_and_return_to_caller(R11);
 683           }
 684         }
 685         break;
 686       case BarrierSet::CardTableModRef:
 687       case BarrierSet::CardTableExtension:
 688         {
 689           Label Lskip_loop, Lstore_loop;
 690           if (UseConcMarkSweepGC) {
 691             // TODO PPC port: contribute optimization / requires shared changes
 692             __ release();
 693           }
 694 
 695           CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
 696           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 697           assert_different_registers(addr, count, tmp);
 698 
 699           __ sldi(count, count, LogBytesPerHeapOop);
 700           __ addi(count, count, -BytesPerHeapOop);
 701           __ add(count, addr, count);
 702           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 703           __ srdi(addr, addr, CardTableModRefBS::card_shift);
 704           __ srdi(count, count, CardTableModRefBS::card_shift);
 705           __ subf(count, addr, count);
 706           assert_different_registers(R0, addr, count, tmp);
 707           __ load_const(tmp, (address)ct->byte_map_base);
 708           __ addic_(count, count, 1);
 709           __ beq(CCR0, Lskip_loop);
 710           __ li(R0, 0);
 711           __ mtctr(count);
 712           // Byte store loop
 713           __ bind(Lstore_loop);
 714           __ stbx(R0, tmp, addr);
 715           __ addi(addr, addr, 1);
 716           __ bdnz(Lstore_loop);
 717           __ bind(Lskip_loop);
 718 
 719           if (!branchToEnd) __ blr();
 720         }
 721       break;
 722       case BarrierSet::ModRef:
 723         if (!branchToEnd) __ blr();
 724         break;
 725       default:
 726         ShouldNotReachHere();
 727     }
 728   }
 729 
 730   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
 731   //
 732   // Arguments:
 733   //   to:
 734   //   count:
 735   //
 736   // Destroys:
 737   //
 738   address generate_zero_words_aligned8() {
 739     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
 740 
 741     // Implemented as in ClearArray.
 742     address start = __ function_entry();
 743 
 744     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
 745     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
 746     Register tmp1_reg       = R5_ARG3;
 747     Register tmp2_reg       = R6_ARG4;
 748     Register zero_reg       = R7_ARG5;
 749 
 750     // Procedure for large arrays (uses data cache block zero instruction).
 751     Label dwloop, fast, fastloop, restloop, lastdword, done;
 752     int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords);
 753     int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
 754 
 755     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
 756     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
 757     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
 758     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
 759     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
 760 
 761     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
 762     __ beq(CCR0, lastdword);                    // size <= 1
 763     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
 764     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
 765     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
 766 
 767     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
 768     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
 769 
 770     __ beq(CCR0, fast);                         // already 128byte aligned
 771     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
 772     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
 773 
 774     // Clear in first cache line dword-by-dword if not already 128byte aligned.
 775     __ bind(dwloop);
 776       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 777       __ addi(base_ptr_reg, base_ptr_reg, 8);
 778     __ bdnz(dwloop);
 779 
 780     // clear 128byte blocks
 781     __ bind(fast);
 782     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
 783     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
 784 
 785     __ mtctr(tmp1_reg);                         // load counter
 786     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
 787     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
 788 
 789     __ bind(fastloop);
 790       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
 791       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
 792     __ bdnz(fastloop);
 793 
 794     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
 795     __ beq(CCR0, lastdword);                    // rest<=1
 796     __ mtctr(tmp1_reg);                         // load counter
 797 
 798     // Clear rest.
 799     __ bind(restloop);
 800       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 801       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
 802       __ addi(base_ptr_reg, base_ptr_reg, 16);
 803     __ bdnz(restloop);
 804 
 805     __ bind(lastdword);
 806     __ beq(CCR1, done);
 807     __ std(zero_reg, 0, base_ptr_reg);
 808     __ bind(done);
 809     __ blr();                                   // return
 810 
 811     return start;
 812   }
 813 
 814   // The following routine generates a subroutine to throw an asynchronous
 815   // UnknownError when an unsafe access gets a fault that could not be
 816   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 817   //
 818   address generate_handler_for_unsafe_access() {
 819     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 820     address start = __ function_entry();
 821     __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
 822     return start;
 823   }
 824 
 825 #if !defined(PRODUCT)
 826   // Wrapper which calls oopDesc::is_oop_or_null()
 827   // Only called by MacroAssembler::verify_oop
 828   static void verify_oop_helper(const char* message, oop o) {
 829     if (!o->is_oop_or_null()) {
 830       fatal(message);
 831     }
 832     ++ StubRoutines::_verify_oop_count;
 833   }
 834 #endif
 835 
 836   // Return address of code to be called from code generated by
 837   // MacroAssembler::verify_oop.
 838   //
 839   // Don't generate, rather use C++ code.
 840   address generate_verify_oop() {
 841     // this is actually a `FunctionDescriptor*'.
 842     address start = 0;
 843 
 844 #if !defined(PRODUCT)
 845     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 846 #endif
 847 
 848     return start;
 849   }
 850 
 851   // Fairer handling of safepoints for native methods.
 852   //
 853   // Generate code which reads from the polling page. This special handling is needed as the
 854   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 855   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 856   // to read from the safepoint polling page.
 857   address generate_load_from_poll() {
 858     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 859     address start = __ function_entry();
 860     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 861     return start;
 862   }
 863 
 864   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 865   //
 866   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 867   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 868   //
 869   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 870   // for turning on loop predication optimization, and hence the behavior of "array range check"
 871   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 872   //
 873   // Generate stub for disjoint short fill. If "aligned" is true, the
 874   // "to" address is assumed to be heapword aligned.
 875   //
 876   // Arguments for generated stub:
 877   //   to:    R3_ARG1
 878   //   value: R4_ARG2
 879   //   count: R5_ARG3 treated as signed
 880   //
 881   address generate_fill(BasicType t, bool aligned, const char* name) {
 882     StubCodeMark mark(this, "StubRoutines", name);
 883     address start = __ function_entry();
 884 
 885     const Register to    = R3_ARG1;   // source array address
 886     const Register value = R4_ARG2;   // fill value
 887     const Register count = R5_ARG3;   // elements count
 888     const Register temp  = R6_ARG4;   // temp register
 889 
 890     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
 891 
 892     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
 893     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
 894 
 895     int shift = -1;
 896     switch (t) {
 897        case T_BYTE:
 898         shift = 2;
 899         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 900         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
 901         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 902         __ blt(CCR0, L_fill_elements);
 903         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 904         break;
 905        case T_SHORT:
 906         shift = 1;
 907         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 908         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 909         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 910         __ blt(CCR0, L_fill_elements);
 911         break;
 912       case T_INT:
 913         shift = 0;
 914         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 915         __ blt(CCR0, L_fill_4_bytes);
 916         break;
 917       default: ShouldNotReachHere();
 918     }
 919 
 920     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
 921       // Align source address at 4 bytes address boundary.
 922       if (t == T_BYTE) {
 923         // One byte misalignment happens only for byte arrays.
 924         __ andi_(temp, to, 1);
 925         __ beq(CCR0, L_skip_align1);
 926         __ stb(value, 0, to);
 927         __ addi(to, to, 1);
 928         __ addi(count, count, -1);
 929         __ bind(L_skip_align1);
 930       }
 931       // Two bytes misalignment happens only for byte and short (char) arrays.
 932       __ andi_(temp, to, 2);
 933       __ beq(CCR0, L_skip_align2);
 934       __ sth(value, 0, to);
 935       __ addi(to, to, 2);
 936       __ addi(count, count, -(1 << (shift - 1)));
 937       __ bind(L_skip_align2);
 938     }
 939 
 940     if (!aligned) {
 941       // Align to 8 bytes, we know we are 4 byte aligned to start.
 942       __ andi_(temp, to, 7);
 943       __ beq(CCR0, L_fill_32_bytes);
 944       __ stw(value, 0, to);
 945       __ addi(to, to, 4);
 946       __ addi(count, count, -(1 << shift));
 947       __ bind(L_fill_32_bytes);
 948     }
 949 
 950     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
 951     // Clone bytes int->long as above.
 952     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
 953 
 954     Label L_check_fill_8_bytes;
 955     // Fill 32-byte chunks.
 956     __ subf_(count, temp, count);
 957     __ blt(CCR0, L_check_fill_8_bytes);
 958 
 959     Label L_fill_32_bytes_loop;
 960     __ align(32);
 961     __ bind(L_fill_32_bytes_loop);
 962 
 963     __ std(value, 0, to);
 964     __ std(value, 8, to);
 965     __ subf_(count, temp, count);           // Update count.
 966     __ std(value, 16, to);
 967     __ std(value, 24, to);
 968 
 969     __ addi(to, to, 32);
 970     __ bge(CCR0, L_fill_32_bytes_loop);
 971 
 972     __ bind(L_check_fill_8_bytes);
 973     __ add_(count, temp, count);
 974     __ beq(CCR0, L_exit);
 975     __ addic_(count, count, -(2 << shift));
 976     __ blt(CCR0, L_fill_4_bytes);
 977 
 978     //
 979     // Length is too short, just fill 8 bytes at a time.
 980     //
 981     Label L_fill_8_bytes_loop;
 982     __ bind(L_fill_8_bytes_loop);
 983     __ std(value, 0, to);
 984     __ addic_(count, count, -(2 << shift));
 985     __ addi(to, to, 8);
 986     __ bge(CCR0, L_fill_8_bytes_loop);
 987 
 988     // Fill trailing 4 bytes.
 989     __ bind(L_fill_4_bytes);
 990     __ andi_(temp, count, 1<<shift);
 991     __ beq(CCR0, L_fill_2_bytes);
 992 
 993     __ stw(value, 0, to);
 994     if (t == T_BYTE || t == T_SHORT) {
 995       __ addi(to, to, 4);
 996       // Fill trailing 2 bytes.
 997       __ bind(L_fill_2_bytes);
 998       __ andi_(temp, count, 1<<(shift-1));
 999       __ beq(CCR0, L_fill_byte);
1000       __ sth(value, 0, to);
1001       if (t == T_BYTE) {
1002         __ addi(to, to, 2);
1003         // Fill trailing byte.
1004         __ bind(L_fill_byte);
1005         __ andi_(count, count, 1);
1006         __ beq(CCR0, L_exit);
1007         __ stb(value, 0, to);
1008       } else {
1009         __ bind(L_fill_byte);
1010       }
1011     } else {
1012       __ bind(L_fill_2_bytes);
1013     }
1014     __ bind(L_exit);
1015     __ blr();
1016 
1017     // Handle copies less than 8 bytes. Int is handled elsewhere.
1018     if (t == T_BYTE) {
1019       __ bind(L_fill_elements);
1020       Label L_fill_2, L_fill_4;
1021       __ andi_(temp, count, 1);
1022       __ beq(CCR0, L_fill_2);
1023       __ stb(value, 0, to);
1024       __ addi(to, to, 1);
1025       __ bind(L_fill_2);
1026       __ andi_(temp, count, 2);
1027       __ beq(CCR0, L_fill_4);
1028       __ stb(value, 0, to);
1029       __ stb(value, 0, to);
1030       __ addi(to, to, 2);
1031       __ bind(L_fill_4);
1032       __ andi_(temp, count, 4);
1033       __ beq(CCR0, L_exit);
1034       __ stb(value, 0, to);
1035       __ stb(value, 1, to);
1036       __ stb(value, 2, to);
1037       __ stb(value, 3, to);
1038       __ blr();
1039     }
1040 
1041     if (t == T_SHORT) {
1042       Label L_fill_2;
1043       __ bind(L_fill_elements);
1044       __ andi_(temp, count, 1);
1045       __ beq(CCR0, L_fill_2);
1046       __ sth(value, 0, to);
1047       __ addi(to, to, 2);
1048       __ bind(L_fill_2);
1049       __ andi_(temp, count, 2);
1050       __ beq(CCR0, L_exit);
1051       __ sth(value, 0, to);
1052       __ sth(value, 2, to);
1053       __ blr();
1054     }
1055     return start;
1056   }
1057 
1058 
1059   // Generate overlap test for array copy stubs.
1060   //
1061   // Input:
1062   //   R3_ARG1    -  from
1063   //   R4_ARG2    -  to
1064   //   R5_ARG3    -  element count
1065   //
1066   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1067     Register tmp1 = R6_ARG4;
1068     Register tmp2 = R7_ARG5;
1069 
1070     Label l_overlap;
1071 #ifdef ASSERT
1072     __ srdi_(tmp2, R5_ARG3, 31);
1073     __ asm_assert_eq("missing zero extend", 0xAFFE);
1074 #endif
1075 
1076     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1077     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1078     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1079     __ cmpld(CCR1, tmp1, tmp2);
1080     __ crand(CCR0, Assembler::less, CCR1, Assembler::less);
1081     __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size.
1082 
1083     // need to copy forwards
1084     if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
1085       __ b(no_overlap_target);
1086     } else {
1087       __ load_const(tmp1, no_overlap_target, tmp2);
1088       __ mtctr(tmp1);
1089       __ bctr();
1090     }
1091 
1092     __ bind(l_overlap);
1093     // need to copy backwards
1094   }
1095 
1096   // The guideline in the implementations of generate_disjoint_xxx_copy
1097   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1098   // single instructions, but to avoid alignment interrupts (see subsequent
1099   // comment). Furthermore, we try to minimize misaligned access, even
1100   // though they cause no alignment interrupt.
1101   //
1102   // In Big-Endian mode, the PowerPC architecture requires implementations to
1103   // handle automatically misaligned integer halfword and word accesses,
1104   // word-aligned integer doubleword accesses, and word-aligned floating-point
1105   // accesses. Other accesses may or may not generate an Alignment interrupt
1106   // depending on the implementation.
1107   // Alignment interrupt handling may require on the order of hundreds of cycles,
1108   // so every effort should be made to avoid misaligned memory values.
1109   //
1110   //
1111   // Generate stub for disjoint byte copy.  If "aligned" is true, the
1112   // "from" and "to" addresses are assumed to be heapword aligned.
1113   //
1114   // Arguments for generated stub:
1115   //      from:  R3_ARG1
1116   //      to:    R4_ARG2
1117   //      count: R5_ARG3 treated as signed
1118   //
1119   address generate_disjoint_byte_copy(bool aligned, const char * name) {
1120     StubCodeMark mark(this, "StubRoutines", name);
1121     address start = __ function_entry();
1122 
1123     Register tmp1 = R6_ARG4;
1124     Register tmp2 = R7_ARG5;
1125     Register tmp3 = R8_ARG6;
1126     Register tmp4 = R9_ARG7;
1127 
1128 
1129     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1130     // Don't try anything fancy if arrays don't have many elements.
1131     __ li(tmp3, 0);
1132     __ cmpwi(CCR0, R5_ARG3, 17);
1133     __ ble(CCR0, l_6); // copy 4 at a time
1134 
1135     if (!aligned) {
1136       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1137       __ andi_(tmp1, tmp1, 3);
1138       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1139 
1140       // Copy elements if necessary to align to 4 bytes.
1141       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1142       __ andi_(tmp1, tmp1, 3);
1143       __ beq(CCR0, l_2);
1144 
1145       __ subf(R5_ARG3, tmp1, R5_ARG3);
1146       __ bind(l_9);
1147       __ lbz(tmp2, 0, R3_ARG1);
1148       __ addic_(tmp1, tmp1, -1);
1149       __ stb(tmp2, 0, R4_ARG2);
1150       __ addi(R3_ARG1, R3_ARG1, 1);
1151       __ addi(R4_ARG2, R4_ARG2, 1);
1152       __ bne(CCR0, l_9);
1153 
1154       __ bind(l_2);
1155     }
1156 
1157     // copy 8 elements at a time
1158     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1159     __ andi_(tmp1, tmp2, 7);
1160     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1161 
1162     // copy a 2-element word if necessary to align to 8 bytes
1163     __ andi_(R0, R3_ARG1, 7);
1164     __ beq(CCR0, l_7);
1165 
1166     __ lwzx(tmp2, R3_ARG1, tmp3);
1167     __ addi(R5_ARG3, R5_ARG3, -4);
1168     __ stwx(tmp2, R4_ARG2, tmp3);
1169     { // FasterArrayCopy
1170       __ addi(R3_ARG1, R3_ARG1, 4);
1171       __ addi(R4_ARG2, R4_ARG2, 4);
1172     }
1173     __ bind(l_7);
1174 
1175     { // FasterArrayCopy
1176       __ cmpwi(CCR0, R5_ARG3, 31);
1177       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1178 
1179       __ srdi(tmp1, R5_ARG3, 5);
1180       __ andi_(R5_ARG3, R5_ARG3, 31);
1181       __ mtctr(tmp1);
1182 
1183       __ bind(l_8);
1184       // Use unrolled version for mass copying (copy 32 elements a time)
1185       // Load feeding store gets zero latency on Power6, however not on Power5.
1186       // Therefore, the following sequence is made for the good of both.
1187       __ ld(tmp1, 0, R3_ARG1);
1188       __ ld(tmp2, 8, R3_ARG1);
1189       __ ld(tmp3, 16, R3_ARG1);
1190       __ ld(tmp4, 24, R3_ARG1);
1191       __ std(tmp1, 0, R4_ARG2);
1192       __ std(tmp2, 8, R4_ARG2);
1193       __ std(tmp3, 16, R4_ARG2);
1194       __ std(tmp4, 24, R4_ARG2);
1195       __ addi(R3_ARG1, R3_ARG1, 32);
1196       __ addi(R4_ARG2, R4_ARG2, 32);
1197       __ bdnz(l_8);
1198     }
1199 
1200     __ bind(l_6);
1201 
1202     // copy 4 elements at a time
1203     __ cmpwi(CCR0, R5_ARG3, 4);
1204     __ blt(CCR0, l_1);
1205     __ srdi(tmp1, R5_ARG3, 2);
1206     __ mtctr(tmp1); // is > 0
1207     __ andi_(R5_ARG3, R5_ARG3, 3);
1208 
1209     { // FasterArrayCopy
1210       __ addi(R3_ARG1, R3_ARG1, -4);
1211       __ addi(R4_ARG2, R4_ARG2, -4);
1212       __ bind(l_3);
1213       __ lwzu(tmp2, 4, R3_ARG1);
1214       __ stwu(tmp2, 4, R4_ARG2);
1215       __ bdnz(l_3);
1216       __ addi(R3_ARG1, R3_ARG1, 4);
1217       __ addi(R4_ARG2, R4_ARG2, 4);
1218     }
1219 
1220     // do single element copy
1221     __ bind(l_1);
1222     __ cmpwi(CCR0, R5_ARG3, 0);
1223     __ beq(CCR0, l_4);
1224 
1225     { // FasterArrayCopy
1226       __ mtctr(R5_ARG3);
1227       __ addi(R3_ARG1, R3_ARG1, -1);
1228       __ addi(R4_ARG2, R4_ARG2, -1);
1229 
1230       __ bind(l_5);
1231       __ lbzu(tmp2, 1, R3_ARG1);
1232       __ stbu(tmp2, 1, R4_ARG2);
1233       __ bdnz(l_5);
1234     }
1235 
1236     __ bind(l_4);
1237     __ blr();
1238 
1239     return start;
1240   }
1241 
1242   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1243   // "from" and "to" addresses are assumed to be heapword aligned.
1244   //
1245   // Arguments for generated stub:
1246   //      from:  R3_ARG1
1247   //      to:    R4_ARG2
1248   //      count: R5_ARG3 treated as signed
1249   //
1250   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1251     StubCodeMark mark(this, "StubRoutines", name);
1252     address start = __ function_entry();
1253 
1254     Register tmp1 = R6_ARG4;
1255     Register tmp2 = R7_ARG5;
1256     Register tmp3 = R8_ARG6;
1257 
1258 #if defined(ABI_ELFv2)
1259      address nooverlap_target = aligned ?
1260        StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
1261        StubRoutines::jbyte_disjoint_arraycopy();
1262 #else
1263     address nooverlap_target = aligned ?
1264       ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
1265       ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
1266 #endif
1267 
1268     array_overlap_test(nooverlap_target, 0);
1269     // Do reverse copy. We assume the case of actual overlap is rare enough
1270     // that we don't have to optimize it.
1271     Label l_1, l_2;
1272 
1273     __ b(l_2);
1274     __ bind(l_1);
1275     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1276     __ bind(l_2);
1277     __ addic_(R5_ARG3, R5_ARG3, -1);
1278     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1279     __ bge(CCR0, l_1);
1280 
1281     __ blr();
1282 
1283     return start;
1284   }
1285 
1286   // Generate stub for disjoint short copy.  If "aligned" is true, the
1287   // "from" and "to" addresses are assumed to be heapword aligned.
1288   //
1289   // Arguments for generated stub:
1290   //      from:  R3_ARG1
1291   //      to:    R4_ARG2
1292   //  elm.count: R5_ARG3 treated as signed
1293   //
1294   // Strategy for aligned==true:
1295   //
1296   //  If length <= 9:
1297   //     1. copy 2 elements at a time (l_6)
1298   //     2. copy last element if original element count was odd (l_1)
1299   //
1300   //  If length > 9:
1301   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1302   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1303   //     3. copy last element if one was left in step 2. (l_1)
1304   //
1305   //
1306   // Strategy for aligned==false:
1307   //
1308   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1309   //                  can be unaligned (see comment below)
1310   //
1311   //  If length > 9:
1312   //     1. continue with step 6. if the alignment of from and to mod 4
1313   //        is different.
1314   //     2. align from and to to 4 bytes by copying 1 element if necessary
1315   //     3. at l_2 from and to are 4 byte aligned; continue with
1316   //        5. if they cannot be aligned to 8 bytes because they have
1317   //        got different alignment mod 8.
1318   //     4. at this point we know that both, from and to, have the same
1319   //        alignment mod 8, now copy one element if necessary to get
1320   //        8 byte alignment of from and to.
1321   //     5. copy 4 elements at a time until less than 4 elements are
1322   //        left; depending on step 3. all load/stores are aligned or
1323   //        either all loads or all stores are unaligned.
1324   //     6. copy 2 elements at a time until less than 2 elements are
1325   //        left (l_6); arriving here from step 1., there is a chance
1326   //        that all accesses are unaligned.
1327   //     7. copy last element if one was left in step 6. (l_1)
1328   //
1329   //  There are unaligned data accesses using integer load/store
1330   //  instructions in this stub. POWER allows such accesses.
1331   //
1332   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1333   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1334   //  integer load/stores have good performance. Only unaligned
1335   //  floating point load/stores can have poor performance.
1336   //
1337   //  TODO:
1338   //
1339   //  1. check if aligning the backbranch target of loops is beneficial
1340   //
1341   address generate_disjoint_short_copy(bool aligned, const char * name) {
1342     StubCodeMark mark(this, "StubRoutines", name);
1343 
1344     Register tmp1 = R6_ARG4;
1345     Register tmp2 = R7_ARG5;
1346     Register tmp3 = R8_ARG6;
1347     Register tmp4 = R9_ARG7;
1348 
1349     address start = __ function_entry();
1350 
1351       Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
1352     // don't try anything fancy if arrays don't have many elements
1353     __ li(tmp3, 0);
1354     __ cmpwi(CCR0, R5_ARG3, 9);
1355     __ ble(CCR0, l_6); // copy 2 at a time
1356 
1357     if (!aligned) {
1358       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1359       __ andi_(tmp1, tmp1, 3);
1360       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1361 
1362       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1363 
1364       // Copy 1 element if necessary to align to 4 bytes.
1365       __ andi_(tmp1, R3_ARG1, 3);
1366       __ beq(CCR0, l_2);
1367 
1368       __ lhz(tmp2, 0, R3_ARG1);
1369       __ addi(R3_ARG1, R3_ARG1, 2);
1370       __ sth(tmp2, 0, R4_ARG2);
1371       __ addi(R4_ARG2, R4_ARG2, 2);
1372       __ addi(R5_ARG3, R5_ARG3, -1);
1373       __ bind(l_2);
1374 
1375       // At this point the positions of both, from and to, are at least 4 byte aligned.
1376 
1377       // Copy 4 elements at a time.
1378       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1379       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1380       __ andi_(tmp1, tmp2, 7);
1381       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1382 
1383       // Copy a 2-element word if necessary to align to 8 bytes.
1384       __ andi_(R0, R3_ARG1, 7);
1385       __ beq(CCR0, l_7);
1386 
1387       __ lwzx(tmp2, R3_ARG1, tmp3);
1388       __ addi(R5_ARG3, R5_ARG3, -2);
1389       __ stwx(tmp2, R4_ARG2, tmp3);
1390       { // FasterArrayCopy
1391         __ addi(R3_ARG1, R3_ARG1, 4);
1392         __ addi(R4_ARG2, R4_ARG2, 4);
1393       }
1394     }
1395 
1396     __ bind(l_7);
1397 
1398     // Copy 4 elements at a time; either the loads or the stores can
1399     // be unaligned if aligned == false.
1400 
1401     { // FasterArrayCopy
1402       __ cmpwi(CCR0, R5_ARG3, 15);
1403       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1404 
1405       __ srdi(tmp1, R5_ARG3, 4);
1406       __ andi_(R5_ARG3, R5_ARG3, 15);
1407       __ mtctr(tmp1);
1408 
1409       __ bind(l_8);
1410       // Use unrolled version for mass copying (copy 16 elements a time).
1411       // Load feeding store gets zero latency on Power6, however not on Power5.
1412       // Therefore, the following sequence is made for the good of both.
1413       __ ld(tmp1, 0, R3_ARG1);
1414       __ ld(tmp2, 8, R3_ARG1);
1415       __ ld(tmp3, 16, R3_ARG1);
1416       __ ld(tmp4, 24, R3_ARG1);
1417       __ std(tmp1, 0, R4_ARG2);
1418       __ std(tmp2, 8, R4_ARG2);
1419       __ std(tmp3, 16, R4_ARG2);
1420       __ std(tmp4, 24, R4_ARG2);
1421       __ addi(R3_ARG1, R3_ARG1, 32);
1422       __ addi(R4_ARG2, R4_ARG2, 32);
1423       __ bdnz(l_8);
1424     }
1425     __ bind(l_6);
1426 
1427     // copy 2 elements at a time
1428     { // FasterArrayCopy
1429       __ cmpwi(CCR0, R5_ARG3, 2);
1430       __ blt(CCR0, l_1);
1431       __ srdi(tmp1, R5_ARG3, 1);
1432       __ andi_(R5_ARG3, R5_ARG3, 1);
1433 
1434       __ addi(R3_ARG1, R3_ARG1, -4);
1435       __ addi(R4_ARG2, R4_ARG2, -4);
1436       __ mtctr(tmp1);
1437 
1438       __ bind(l_3);
1439       __ lwzu(tmp2, 4, R3_ARG1);
1440       __ stwu(tmp2, 4, R4_ARG2);
1441       __ bdnz(l_3);
1442 
1443       __ addi(R3_ARG1, R3_ARG1, 4);
1444       __ addi(R4_ARG2, R4_ARG2, 4);
1445     }
1446 
1447     // do single element copy
1448     __ bind(l_1);
1449     __ cmpwi(CCR0, R5_ARG3, 0);
1450     __ beq(CCR0, l_4);
1451 
1452     { // FasterArrayCopy
1453       __ mtctr(R5_ARG3);
1454       __ addi(R3_ARG1, R3_ARG1, -2);
1455       __ addi(R4_ARG2, R4_ARG2, -2);
1456 
1457       __ bind(l_5);
1458       __ lhzu(tmp2, 2, R3_ARG1);
1459       __ sthu(tmp2, 2, R4_ARG2);
1460       __ bdnz(l_5);
1461     }
1462     __ bind(l_4);
1463     __ blr();
1464 
1465     return start;
1466   }
1467 
1468   // Generate stub for conjoint short copy.  If "aligned" is true, the
1469   // "from" and "to" addresses are assumed to be heapword aligned.
1470   //
1471   // Arguments for generated stub:
1472   //      from:  R3_ARG1
1473   //      to:    R4_ARG2
1474   //      count: R5_ARG3 treated as signed
1475   //
1476   address generate_conjoint_short_copy(bool aligned, const char * name) {
1477     StubCodeMark mark(this, "StubRoutines", name);
1478     address start = __ function_entry();
1479 
1480     Register tmp1 = R6_ARG4;
1481     Register tmp2 = R7_ARG5;
1482     Register tmp3 = R8_ARG6;
1483 
1484 #if defined(ABI_ELFv2)
1485     address nooverlap_target = aligned ?
1486         StubRoutines::arrayof_jshort_disjoint_arraycopy() :
1487         StubRoutines::jshort_disjoint_arraycopy();
1488 #else
1489     address nooverlap_target = aligned ?
1490         ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
1491         ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
1492 #endif
1493 
1494     array_overlap_test(nooverlap_target, 1);
1495 
1496     Label l_1, l_2;
1497     __ sldi(tmp1, R5_ARG3, 1);
1498     __ b(l_2);
1499     __ bind(l_1);
1500     __ sthx(tmp2, R4_ARG2, tmp1);
1501     __ bind(l_2);
1502     __ addic_(tmp1, tmp1, -2);
1503     __ lhzx(tmp2, R3_ARG1, tmp1);
1504     __ bge(CCR0, l_1);
1505 
1506     __ blr();
1507 
1508     return start;
1509   }
1510 
1511   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1512   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1513   //
1514   // Arguments:
1515   //      from:  R3_ARG1
1516   //      to:    R4_ARG2
1517   //      count: R5_ARG3 treated as signed
1518   //
1519   void generate_disjoint_int_copy_core(bool aligned) {
1520     Register tmp1 = R6_ARG4;
1521     Register tmp2 = R7_ARG5;
1522     Register tmp3 = R8_ARG6;
1523     Register tmp4 = R0;
1524 
1525     Label l_1, l_2, l_3, l_4, l_5, l_6;
1526     // for short arrays, just do single element copy
1527     __ li(tmp3, 0);
1528     __ cmpwi(CCR0, R5_ARG3, 5);
1529     __ ble(CCR0, l_2);
1530 
1531     if (!aligned) {
1532         // check if arrays have same alignment mod 8.
1533         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1534         __ andi_(R0, tmp1, 7);
1535         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1536         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1537 
1538         // copy 1 element to align to and from on an 8 byte boundary
1539         __ andi_(R0, R3_ARG1, 7);
1540         __ beq(CCR0, l_4);
1541 
1542         __ lwzx(tmp2, R3_ARG1, tmp3);
1543         __ addi(R5_ARG3, R5_ARG3, -1);
1544         __ stwx(tmp2, R4_ARG2, tmp3);
1545         { // FasterArrayCopy
1546           __ addi(R3_ARG1, R3_ARG1, 4);
1547           __ addi(R4_ARG2, R4_ARG2, 4);
1548         }
1549         __ bind(l_4);
1550       }
1551 
1552     { // FasterArrayCopy
1553       __ cmpwi(CCR0, R5_ARG3, 7);
1554       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1555 
1556       __ srdi(tmp1, R5_ARG3, 3);
1557       __ andi_(R5_ARG3, R5_ARG3, 7);
1558       __ mtctr(tmp1);
1559 
1560       __ bind(l_6);
1561       // Use unrolled version for mass copying (copy 8 elements a time).
1562       // Load feeding store gets zero latency on power6, however not on power 5.
1563       // Therefore, the following sequence is made for the good of both.
1564       __ ld(tmp1, 0, R3_ARG1);
1565       __ ld(tmp2, 8, R3_ARG1);
1566       __ ld(tmp3, 16, R3_ARG1);
1567       __ ld(tmp4, 24, R3_ARG1);
1568       __ std(tmp1, 0, R4_ARG2);
1569       __ std(tmp2, 8, R4_ARG2);
1570       __ std(tmp3, 16, R4_ARG2);
1571       __ std(tmp4, 24, R4_ARG2);
1572       __ addi(R3_ARG1, R3_ARG1, 32);
1573       __ addi(R4_ARG2, R4_ARG2, 32);
1574       __ bdnz(l_6);
1575     }
1576 
1577     // copy 1 element at a time
1578     __ bind(l_2);
1579     __ cmpwi(CCR0, R5_ARG3, 0);
1580     __ beq(CCR0, l_1);
1581 
1582     { // FasterArrayCopy
1583       __ mtctr(R5_ARG3);
1584       __ addi(R3_ARG1, R3_ARG1, -4);
1585       __ addi(R4_ARG2, R4_ARG2, -4);
1586 
1587       __ bind(l_3);
1588       __ lwzu(tmp2, 4, R3_ARG1);
1589       __ stwu(tmp2, 4, R4_ARG2);
1590       __ bdnz(l_3);
1591     }
1592 
1593     __ bind(l_1);
1594     return;
1595   }
1596 
1597   // Generate stub for disjoint int copy.  If "aligned" is true, the
1598   // "from" and "to" addresses are assumed to be heapword aligned.
1599   //
1600   // Arguments for generated stub:
1601   //      from:  R3_ARG1
1602   //      to:    R4_ARG2
1603   //      count: R5_ARG3 treated as signed
1604   //
1605   address generate_disjoint_int_copy(bool aligned, const char * name) {
1606     StubCodeMark mark(this, "StubRoutines", name);
1607     address start = __ function_entry();
1608     generate_disjoint_int_copy_core(aligned);
1609     __ blr();
1610     return start;
1611   }
1612 
1613   // Generate core code for conjoint int copy (and oop copy on
1614   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1615   // are assumed to be heapword aligned.
1616   //
1617   // Arguments:
1618   //      from:  R3_ARG1
1619   //      to:    R4_ARG2
1620   //      count: R5_ARG3 treated as signed
1621   //
1622   void generate_conjoint_int_copy_core(bool aligned) {
1623     // Do reverse copy.  We assume the case of actual overlap is rare enough
1624     // that we don't have to optimize it.
1625 
1626     Label l_1, l_2, l_3, l_4, l_5, l_6;
1627 
1628     Register tmp1 = R6_ARG4;
1629     Register tmp2 = R7_ARG5;
1630     Register tmp3 = R8_ARG6;
1631     Register tmp4 = R0;
1632 
1633     { // FasterArrayCopy
1634       __ cmpwi(CCR0, R5_ARG3, 0);
1635       __ beq(CCR0, l_6);
1636 
1637       __ sldi(R5_ARG3, R5_ARG3, 2);
1638       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1639       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1640       __ srdi(R5_ARG3, R5_ARG3, 2);
1641 
1642       __ cmpwi(CCR0, R5_ARG3, 7);
1643       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1644 
1645       __ srdi(tmp1, R5_ARG3, 3);
1646       __ andi(R5_ARG3, R5_ARG3, 7);
1647       __ mtctr(tmp1);
1648 
1649       __ bind(l_4);
1650       // Use unrolled version for mass copying (copy 4 elements a time).
1651       // Load feeding store gets zero latency on Power6, however not on Power5.
1652       // Therefore, the following sequence is made for the good of both.
1653       __ addi(R3_ARG1, R3_ARG1, -32);
1654       __ addi(R4_ARG2, R4_ARG2, -32);
1655       __ ld(tmp4, 24, R3_ARG1);
1656       __ ld(tmp3, 16, R3_ARG1);
1657       __ ld(tmp2, 8, R3_ARG1);
1658       __ ld(tmp1, 0, R3_ARG1);
1659       __ std(tmp4, 24, R4_ARG2);
1660       __ std(tmp3, 16, R4_ARG2);
1661       __ std(tmp2, 8, R4_ARG2);
1662       __ std(tmp1, 0, R4_ARG2);
1663       __ bdnz(l_4);
1664 
1665       __ cmpwi(CCR0, R5_ARG3, 0);
1666       __ beq(CCR0, l_6);
1667 
1668       __ bind(l_5);
1669       __ mtctr(R5_ARG3);
1670       __ bind(l_3);
1671       __ lwz(R0, -4, R3_ARG1);
1672       __ stw(R0, -4, R4_ARG2);
1673       __ addi(R3_ARG1, R3_ARG1, -4);
1674       __ addi(R4_ARG2, R4_ARG2, -4);
1675       __ bdnz(l_3);
1676 
1677       __ bind(l_6);
1678     }
1679   }
1680 
1681   // Generate stub for conjoint int copy.  If "aligned" is true, the
1682   // "from" and "to" addresses are assumed to be heapword aligned.
1683   //
1684   // Arguments for generated stub:
1685   //      from:  R3_ARG1
1686   //      to:    R4_ARG2
1687   //      count: R5_ARG3 treated as signed
1688   //
1689   address generate_conjoint_int_copy(bool aligned, const char * name) {
1690     StubCodeMark mark(this, "StubRoutines", name);
1691     address start = __ function_entry();
1692 
1693 #if defined(ABI_ELFv2)
1694     address nooverlap_target = aligned ?
1695       StubRoutines::arrayof_jint_disjoint_arraycopy() :
1696       StubRoutines::jint_disjoint_arraycopy();
1697 #else
1698     address nooverlap_target = aligned ?
1699       ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
1700       ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
1701 #endif
1702 
1703     array_overlap_test(nooverlap_target, 2);
1704 
1705     generate_conjoint_int_copy_core(aligned);
1706 
1707     __ blr();
1708 
1709     return start;
1710   }
1711 
1712   // Generate core code for disjoint long copy (and oop copy on
1713   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1714   // are assumed to be heapword aligned.
1715   //
1716   // Arguments:
1717   //      from:  R3_ARG1
1718   //      to:    R4_ARG2
1719   //      count: R5_ARG3 treated as signed
1720   //
1721   void generate_disjoint_long_copy_core(bool aligned) {
1722     Register tmp1 = R6_ARG4;
1723     Register tmp2 = R7_ARG5;
1724     Register tmp3 = R8_ARG6;
1725     Register tmp4 = R0;
1726 
1727     Label l_1, l_2, l_3, l_4;
1728 
1729     { // FasterArrayCopy
1730       __ cmpwi(CCR0, R5_ARG3, 3);
1731       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1732 
1733       __ srdi(tmp1, R5_ARG3, 2);
1734       __ andi_(R5_ARG3, R5_ARG3, 3);
1735       __ mtctr(tmp1);
1736 
1737       __ bind(l_4);
1738       // Use unrolled version for mass copying (copy 4 elements a time).
1739       // Load feeding store gets zero latency on Power6, however not on Power5.
1740       // Therefore, the following sequence is made for the good of both.
1741       __ ld(tmp1, 0, R3_ARG1);
1742       __ ld(tmp2, 8, R3_ARG1);
1743       __ ld(tmp3, 16, R3_ARG1);
1744       __ ld(tmp4, 24, R3_ARG1);
1745       __ std(tmp1, 0, R4_ARG2);
1746       __ std(tmp2, 8, R4_ARG2);
1747       __ std(tmp3, 16, R4_ARG2);
1748       __ std(tmp4, 24, R4_ARG2);
1749       __ addi(R3_ARG1, R3_ARG1, 32);
1750       __ addi(R4_ARG2, R4_ARG2, 32);
1751       __ bdnz(l_4);
1752     }
1753 
1754     // copy 1 element at a time
1755     __ bind(l_3);
1756     __ cmpwi(CCR0, R5_ARG3, 0);
1757     __ beq(CCR0, l_1);
1758 
1759     { // FasterArrayCopy
1760       __ mtctr(R5_ARG3);
1761       __ addi(R3_ARG1, R3_ARG1, -8);
1762       __ addi(R4_ARG2, R4_ARG2, -8);
1763 
1764       __ bind(l_2);
1765       __ ldu(R0, 8, R3_ARG1);
1766       __ stdu(R0, 8, R4_ARG2);
1767       __ bdnz(l_2);
1768 
1769     }
1770     __ bind(l_1);
1771   }
1772 
1773   // Generate stub for disjoint long copy.  If "aligned" is true, the
1774   // "from" and "to" addresses are assumed to be heapword aligned.
1775   //
1776   // Arguments for generated stub:
1777   //      from:  R3_ARG1
1778   //      to:    R4_ARG2
1779   //      count: R5_ARG3 treated as signed
1780   //
1781   address generate_disjoint_long_copy(bool aligned, const char * name) {
1782     StubCodeMark mark(this, "StubRoutines", name);
1783     address start = __ function_entry();
1784     generate_disjoint_long_copy_core(aligned);
1785     __ blr();
1786 
1787     return start;
1788   }
1789 
1790   // Generate core code for conjoint long copy (and oop copy on
1791   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1792   // are assumed to be heapword aligned.
1793   //
1794   // Arguments:
1795   //      from:  R3_ARG1
1796   //      to:    R4_ARG2
1797   //      count: R5_ARG3 treated as signed
1798   //
1799   void generate_conjoint_long_copy_core(bool aligned) {
1800     Register tmp1 = R6_ARG4;
1801     Register tmp2 = R7_ARG5;
1802     Register tmp3 = R8_ARG6;
1803     Register tmp4 = R0;
1804 
1805     Label l_1, l_2, l_3, l_4, l_5;
1806 
1807     __ cmpwi(CCR0, R5_ARG3, 0);
1808     __ beq(CCR0, l_1);
1809 
1810     { // FasterArrayCopy
1811       __ sldi(R5_ARG3, R5_ARG3, 3);
1812       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1813       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1814       __ srdi(R5_ARG3, R5_ARG3, 3);
1815 
1816       __ cmpwi(CCR0, R5_ARG3, 3);
1817       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1818 
1819       __ srdi(tmp1, R5_ARG3, 2);
1820       __ andi(R5_ARG3, R5_ARG3, 3);
1821       __ mtctr(tmp1);
1822 
1823       __ bind(l_4);
1824       // Use unrolled version for mass copying (copy 4 elements a time).
1825       // Load feeding store gets zero latency on Power6, however not on Power5.
1826       // Therefore, the following sequence is made for the good of both.
1827       __ addi(R3_ARG1, R3_ARG1, -32);
1828       __ addi(R4_ARG2, R4_ARG2, -32);
1829       __ ld(tmp4, 24, R3_ARG1);
1830       __ ld(tmp3, 16, R3_ARG1);
1831       __ ld(tmp2, 8, R3_ARG1);
1832       __ ld(tmp1, 0, R3_ARG1);
1833       __ std(tmp4, 24, R4_ARG2);
1834       __ std(tmp3, 16, R4_ARG2);
1835       __ std(tmp2, 8, R4_ARG2);
1836       __ std(tmp1, 0, R4_ARG2);
1837       __ bdnz(l_4);
1838 
1839       __ cmpwi(CCR0, R5_ARG3, 0);
1840       __ beq(CCR0, l_1);
1841 
1842       __ bind(l_5);
1843       __ mtctr(R5_ARG3);
1844       __ bind(l_3);
1845       __ ld(R0, -8, R3_ARG1);
1846       __ std(R0, -8, R4_ARG2);
1847       __ addi(R3_ARG1, R3_ARG1, -8);
1848       __ addi(R4_ARG2, R4_ARG2, -8);
1849       __ bdnz(l_3);
1850 
1851     }
1852     __ bind(l_1);
1853   }
1854 
1855   // Generate stub for conjoint long copy.  If "aligned" is true, the
1856   // "from" and "to" addresses are assumed to be heapword aligned.
1857   //
1858   // Arguments for generated stub:
1859   //      from:  R3_ARG1
1860   //      to:    R4_ARG2
1861   //      count: R5_ARG3 treated as signed
1862   //
1863   address generate_conjoint_long_copy(bool aligned, const char * name) {
1864     StubCodeMark mark(this, "StubRoutines", name);
1865     address start = __ function_entry();
1866 
1867 #if defined(ABI_ELFv2)
1868     address nooverlap_target = aligned ?
1869       StubRoutines::arrayof_jlong_disjoint_arraycopy() :
1870       StubRoutines::jlong_disjoint_arraycopy();
1871 #else
1872     address nooverlap_target = aligned ?
1873       ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
1874       ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
1875 #endif
1876 
1877     array_overlap_test(nooverlap_target, 3);
1878     generate_conjoint_long_copy_core(aligned);
1879 
1880     __ blr();
1881 
1882     return start;
1883   }
1884 
1885   // Generate stub for conjoint oop copy.  If "aligned" is true, the
1886   // "from" and "to" addresses are assumed to be heapword aligned.
1887   //
1888   // Arguments for generated stub:
1889   //      from:  R3_ARG1
1890   //      to:    R4_ARG2
1891   //      count: R5_ARG3 treated as signed
1892   //      dest_uninitialized: G1 support
1893   //
1894   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1895     StubCodeMark mark(this, "StubRoutines", name);
1896 
1897     address start = __ function_entry();
1898 
1899 #if defined(ABI_ELFv2)
1900     address nooverlap_target = aligned ?
1901       StubRoutines::arrayof_oop_disjoint_arraycopy() :
1902       StubRoutines::oop_disjoint_arraycopy();
1903 #else
1904     address nooverlap_target = aligned ?
1905       ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
1906       ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
1907 #endif
1908 
1909     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1910 
1911     // Save arguments.
1912     __ mr(R9_ARG7, R4_ARG2);
1913     __ mr(R10_ARG8, R5_ARG3);
1914 
1915     if (UseCompressedOops) {
1916       array_overlap_test(nooverlap_target, 2);
1917       generate_conjoint_int_copy_core(aligned);
1918     } else {
1919       array_overlap_test(nooverlap_target, 3);
1920       generate_conjoint_long_copy_core(aligned);
1921     }
1922 
1923     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
1924     return start;
1925   }
1926 
1927   // Generate stub for disjoint oop copy.  If "aligned" is true, the
1928   // "from" and "to" addresses are assumed to be heapword aligned.
1929   //
1930   // Arguments for generated stub:
1931   //      from:  R3_ARG1
1932   //      to:    R4_ARG2
1933   //      count: R5_ARG3 treated as signed
1934   //      dest_uninitialized: G1 support
1935   //
1936   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1937     StubCodeMark mark(this, "StubRoutines", name);
1938     address start = __ function_entry();
1939 
1940     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1941 
1942     // save some arguments, disjoint_long_copy_core destroys them.
1943     // needed for post barrier
1944     __ mr(R9_ARG7, R4_ARG2);
1945     __ mr(R10_ARG8, R5_ARG3);
1946 
1947     if (UseCompressedOops) {
1948       generate_disjoint_int_copy_core(aligned);
1949     } else {
1950       generate_disjoint_long_copy_core(aligned);
1951     }
1952 
1953     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
1954 
1955     return start;
1956   }
1957 
1958   void generate_arraycopy_stubs() {
1959     // Note: the disjoint stubs must be generated first, some of
1960     // the conjoint stubs use them.
1961 
1962     // non-aligned disjoint versions
1963     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
1964     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
1965     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
1966     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
1967     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
1968     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
1969 
1970     // aligned disjoint versions
1971     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
1972     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
1973     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
1974     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
1975     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
1976     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
1977 
1978     // non-aligned conjoint versions
1979     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
1980     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
1981     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
1982     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
1983     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
1984     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
1985 
1986     // aligned conjoint versions
1987     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
1988     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
1989     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
1990     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
1991     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
1992     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
1993 
1994     // fill routines
1995     StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
1996     StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
1997     StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
1998     StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
1999     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2000     StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
2001   }
2002 
2003   // Safefetch stubs.
2004   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2005     // safefetch signatures:
2006     //   int      SafeFetch32(int*      adr, int      errValue);
2007     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2008     //
2009     // arguments:
2010     //   R3_ARG1 = adr
2011     //   R4_ARG2 = errValue
2012     //
2013     // result:
2014     //   R3_RET  = *adr or errValue
2015 
2016     StubCodeMark mark(this, "StubRoutines", name);
2017 
2018     // Entry point, pc or function descriptor.
2019     *entry = __ function_entry();
2020 
2021     // Load *adr into R4_ARG2, may fault.
2022     *fault_pc = __ pc();
2023     switch (size) {
2024       case 4:
2025         // int32_t, signed extended
2026         __ lwa(R4_ARG2, 0, R3_ARG1);
2027         break;
2028       case 8:
2029         // int64_t
2030         __ ld(R4_ARG2, 0, R3_ARG1);
2031         break;
2032       default:
2033         ShouldNotReachHere();
2034     }
2035 
2036     // return errValue or *adr
2037     *continuation_pc = __ pc();
2038     __ mr(R3_RET, R4_ARG2);
2039     __ blr();
2040   }
2041 
2042   // Initialization
2043   void generate_initial() {
2044     // Generates all stubs and initializes the entry points
2045 
2046     // Entry points that exist in all platforms.
2047     // Note: This is code that could be shared among different platforms - however the
2048     // benefit seems to be smaller than the disadvantage of having a
2049     // much more complicated generator structure. See also comment in
2050     // stubRoutines.hpp.
2051 
2052     StubRoutines::_forward_exception_entry          = generate_forward_exception();
2053     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
2054     StubRoutines::_catch_exception_entry            = generate_catch_exception();
2055 
2056     // Build this early so it's available for the interpreter.
2057     StubRoutines::_throw_StackOverflowError_entry   =
2058       generate_throw_exception("StackOverflowError throw_exception",
2059                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2060   }
2061 
2062   void generate_all() {
2063     // Generates all stubs and initializes the entry points
2064 
2065     // These entry points require SharedInfo::stack0 to be set up in
2066     // non-core builds
2067     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
2068     // Handle IncompatibleClassChangeError in itable stubs.
2069     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
2070     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2071 
2072     StubRoutines::_handler_for_unsafe_access_entry         = generate_handler_for_unsafe_access();
2073 
2074     // support for verify_oop (must happen after universe_init)
2075     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
2076 
2077     // arraycopy stubs used by compilers
2078     generate_arraycopy_stubs();
2079 
2080     if (UseAESIntrinsics) {
2081       guarantee(!UseAESIntrinsics, "not yet implemented.");
2082     }
2083 
2084     // Safefetch stubs.
2085     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
2086                                                        &StubRoutines::_safefetch32_fault_pc,
2087                                                        &StubRoutines::_safefetch32_continuation_pc);
2088     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
2089                                                        &StubRoutines::_safefetchN_fault_pc,
2090                                                        &StubRoutines::_safefetchN_continuation_pc);
2091   }
2092 
2093  public:
2094   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2095     // replace the standard masm with a special one:
2096     _masm = new MacroAssembler(code);
2097     if (all) {
2098       generate_all();
2099     } else {
2100       generate_initial();
2101     }
2102   }
2103 };
2104 
2105 void StubGenerator_generate(CodeBuffer* code, bool all) {
2106   StubGenerator g(code, all);
2107 }