1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2016 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "nativeInst_ppc.hpp"
  30 #include "oops/instanceOop.hpp"
  31 #include "oops/method.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/frame.inline.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubCodeGenerator.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/top.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 
  43 #define __ _masm->
  44 
  45 #ifdef PRODUCT
  46 #define BLOCK_COMMENT(str) // nothing
  47 #else
  48 #define BLOCK_COMMENT(str) __ block_comment(str)
  49 #endif
  50 
  51 #if defined(ABI_ELFv2)
  52 #define STUB_ENTRY(name) StubRoutines::name()
  53 #else
  54 #define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
  55 #endif
  56 
  57 class StubGenerator: public StubCodeGenerator {
  58  private:
  59 
  60   // Call stubs are used to call Java from C
  61   //
  62   // Arguments:
  63   //
  64   //   R3  - call wrapper address     : address
  65   //   R4  - result                   : intptr_t*
  66   //   R5  - result type              : BasicType
  67   //   R6  - method                   : Method
  68   //   R7  - frame mgr entry point    : address
  69   //   R8  - parameter block          : intptr_t*
  70   //   R9  - parameter count in words : int
  71   //   R10 - thread                   : Thread*
  72   //
  73   address generate_call_stub(address& return_address) {
  74     // Setup a new c frame, copy java arguments, call frame manager or
  75     // native_entry, and process result.
  76 
  77     StubCodeMark mark(this, "StubRoutines", "call_stub");
  78 
  79     address start = __ function_entry();
  80 
  81     // some sanity checks
  82     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
  83     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
  84     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
  85     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
  86     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
  87 
  88     Register r_arg_call_wrapper_addr        = R3;
  89     Register r_arg_result_addr              = R4;
  90     Register r_arg_result_type              = R5;
  91     Register r_arg_method                   = R6;
  92     Register r_arg_entry                    = R7;
  93     Register r_arg_thread                   = R10;
  94 
  95     Register r_temp                         = R24;
  96     Register r_top_of_arguments_addr        = R25;
  97     Register r_entryframe_fp                = R26;
  98 
  99     {
 100       // Stack on entry to call_stub:
 101       //
 102       //      F1      [C_FRAME]
 103       //              ...
 104 
 105       Register r_arg_argument_addr          = R8;
 106       Register r_arg_argument_count         = R9;
 107       Register r_frame_alignment_in_bytes   = R27;
 108       Register r_argument_addr              = R28;
 109       Register r_argumentcopy_addr          = R29;
 110       Register r_argument_size_in_bytes     = R30;
 111       Register r_frame_size                 = R23;
 112 
 113       Label arguments_copied;
 114 
 115       // Save LR/CR to caller's C_FRAME.
 116       __ save_LR_CR(R0);
 117 
 118       // Zero extend arg_argument_count.
 119       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
 120 
 121       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
 122       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 123 
 124       // Keep copy of our frame pointer (caller's SP).
 125       __ mr(r_entryframe_fp, R1_SP);
 126 
 127       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
 128       // Push ENTRY_FRAME including arguments:
 129       //
 130       //      F0      [TOP_IJAVA_FRAME_ABI]
 131       //              alignment (optional)
 132       //              [outgoing Java arguments]
 133       //              [ENTRY_FRAME_LOCALS]
 134       //      F1      [C_FRAME]
 135       //              ...
 136 
 137       // calculate frame size
 138 
 139       // unaligned size of arguments
 140       __ sldi(r_argument_size_in_bytes,
 141                   r_arg_argument_count, Interpreter::logStackElementSize);
 142       // arguments alignment (max 1 slot)
 143       // FIXME: use round_to() here
 144       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
 145       __ sldi(r_frame_alignment_in_bytes,
 146               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
 147 
 148       // size = unaligned size of arguments + top abi's size
 149       __ addi(r_frame_size, r_argument_size_in_bytes,
 150               frame::top_ijava_frame_abi_size);
 151       // size += arguments alignment
 152       __ add(r_frame_size,
 153              r_frame_size, r_frame_alignment_in_bytes);
 154       // size += size of call_stub locals
 155       __ addi(r_frame_size,
 156               r_frame_size, frame::entry_frame_locals_size);
 157 
 158       // push ENTRY_FRAME
 159       __ push_frame(r_frame_size, r_temp);
 160 
 161       // initialize call_stub locals (step 1)
 162       __ std(r_arg_call_wrapper_addr,
 163              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
 164       __ std(r_arg_result_addr,
 165              _entry_frame_locals_neg(result_address), r_entryframe_fp);
 166       __ std(r_arg_result_type,
 167              _entry_frame_locals_neg(result_type), r_entryframe_fp);
 168       // we will save arguments_tos_address later
 169 
 170 
 171       BLOCK_COMMENT("Copy Java arguments");
 172       // copy Java arguments
 173 
 174       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
 175       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
 176       __ addi(r_top_of_arguments_addr,
 177               R1_SP, frame::top_ijava_frame_abi_size);
 178       __ add(r_top_of_arguments_addr,
 179              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
 180 
 181       // any arguments to copy?
 182       __ cmpdi(CCR0, r_arg_argument_count, 0);
 183       __ beq(CCR0, arguments_copied);
 184 
 185       // prepare loop and copy arguments in reverse order
 186       {
 187         // init CTR with arg_argument_count
 188         __ mtctr(r_arg_argument_count);
 189 
 190         // let r_argumentcopy_addr point to last outgoing Java arguments P
 191         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
 192 
 193         // let r_argument_addr point to last incoming java argument
 194         __ add(r_argument_addr,
 195                    r_arg_argument_addr, r_argument_size_in_bytes);
 196         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 197 
 198         // now loop while CTR > 0 and copy arguments
 199         {
 200           Label next_argument;
 201           __ bind(next_argument);
 202 
 203           __ ld(r_temp, 0, r_argument_addr);
 204           // argument_addr--;
 205           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
 206           __ std(r_temp, 0, r_argumentcopy_addr);
 207           // argumentcopy_addr++;
 208           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
 209 
 210           __ bdnz(next_argument);
 211         }
 212       }
 213 
 214       // Arguments copied, continue.
 215       __ bind(arguments_copied);
 216     }
 217 
 218     {
 219       BLOCK_COMMENT("Call frame manager or native entry.");
 220       // Call frame manager or native entry.
 221       Register r_new_arg_entry = R14;
 222       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
 223                                  r_arg_method, r_arg_thread);
 224 
 225       __ mr(r_new_arg_entry, r_arg_entry);
 226 
 227       // Register state on entry to frame manager / native entry:
 228       //
 229       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
 230       //   R19_method  -  Method
 231       //   R16_thread  -  JavaThread*
 232 
 233       // Tos must point to last argument - element_size.
 234 #ifdef CC_INTERP
 235       const Register tos = R17_tos;
 236 #else
 237       const Register tos = R15_esp;
 238 #endif
 239       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
 240 
 241       // initialize call_stub locals (step 2)
 242       // now save tos as arguments_tos_address
 243       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
 244 
 245       // load argument registers for call
 246       __ mr(R19_method, r_arg_method);
 247       __ mr(R16_thread, r_arg_thread);
 248       assert(tos != r_arg_method, "trashed r_arg_method");
 249       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
 250 
 251       // Set R15_prev_state to 0 for simplifying checks in callee.
 252 #ifdef CC_INTERP
 253       __ li(R15_prev_state, 0);
 254 #else
 255       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
 256 #endif
 257       // Stack on entry to frame manager / native entry:
 258       //
 259       //      F0      [TOP_IJAVA_FRAME_ABI]
 260       //              alignment (optional)
 261       //              [outgoing Java arguments]
 262       //              [ENTRY_FRAME_LOCALS]
 263       //      F1      [C_FRAME]
 264       //              ...
 265       //
 266 
 267       // global toc register
 268       __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
 269       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
 270       // when called via a c2i.
 271 
 272       // Pass initial_caller_sp to framemanager.
 273       __ mr(R21_tmp1, R1_SP);
 274 
 275       // Do a light-weight C-call here, r_new_arg_entry holds the address
 276       // of the interpreter entry point (frame manager or native entry)
 277       // and save runtime-value of LR in return_address.
 278       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
 279              "trashed r_new_arg_entry");
 280       return_address = __ call_stub(r_new_arg_entry);
 281     }
 282 
 283     {
 284       BLOCK_COMMENT("Returned from frame manager or native entry.");
 285       // Returned from frame manager or native entry.
 286       // Now pop frame, process result, and return to caller.
 287 
 288       // Stack on exit from frame manager / native entry:
 289       //
 290       //      F0      [ABI]
 291       //              ...
 292       //              [ENTRY_FRAME_LOCALS]
 293       //      F1      [C_FRAME]
 294       //              ...
 295       //
 296       // Just pop the topmost frame ...
 297       //
 298 
 299       Label ret_is_object;
 300       Label ret_is_long;
 301       Label ret_is_float;
 302       Label ret_is_double;
 303 
 304       Register r_entryframe_fp = R30;
 305       Register r_lr            = R7_ARG5;
 306       Register r_cr            = R8_ARG6;
 307 
 308       // Reload some volatile registers which we've spilled before the call
 309       // to frame manager / native entry.
 310       // Access all locals via frame pointer, because we know nothing about
 311       // the topmost frame's size.
 312       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
 313       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
 314       __ ld(r_arg_result_addr,
 315             _entry_frame_locals_neg(result_address), r_entryframe_fp);
 316       __ ld(r_arg_result_type,
 317             _entry_frame_locals_neg(result_type), r_entryframe_fp);
 318       __ ld(r_cr, _abi(cr), r_entryframe_fp);
 319       __ ld(r_lr, _abi(lr), r_entryframe_fp);
 320 
 321       // pop frame and restore non-volatiles, LR and CR
 322       __ mr(R1_SP, r_entryframe_fp);
 323       __ mtcr(r_cr);
 324       __ mtlr(r_lr);
 325 
 326       // Store result depending on type. Everything that is not
 327       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
 328       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
 329       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
 330       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
 331       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
 332 
 333       // restore non-volatile registers
 334       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
 335 
 336 
 337       // Stack on exit from call_stub:
 338       //
 339       //      0       [C_FRAME]
 340       //              ...
 341       //
 342       //  no call_stub frames left.
 343 
 344       // All non-volatiles have been restored at this point!!
 345       assert(R3_RET == R3, "R3_RET should be R3");
 346 
 347       __ beq(CCR0, ret_is_object);
 348       __ beq(CCR1, ret_is_long);
 349       __ beq(CCR5, ret_is_float);
 350       __ beq(CCR6, ret_is_double);
 351 
 352       // default:
 353       __ stw(R3_RET, 0, r_arg_result_addr);
 354       __ blr(); // return to caller
 355 
 356       // case T_OBJECT:
 357       __ bind(ret_is_object);
 358       __ std(R3_RET, 0, r_arg_result_addr);
 359       __ blr(); // return to caller
 360 
 361       // case T_LONG:
 362       __ bind(ret_is_long);
 363       __ std(R3_RET, 0, r_arg_result_addr);
 364       __ blr(); // return to caller
 365 
 366       // case T_FLOAT:
 367       __ bind(ret_is_float);
 368       __ stfs(F1_RET, 0, r_arg_result_addr);
 369       __ blr(); // return to caller
 370 
 371       // case T_DOUBLE:
 372       __ bind(ret_is_double);
 373       __ stfd(F1_RET, 0, r_arg_result_addr);
 374       __ blr(); // return to caller
 375     }
 376 
 377     return start;
 378   }
 379 
 380   // Return point for a Java call if there's an exception thrown in
 381   // Java code.  The exception is caught and transformed into a
 382   // pending exception stored in JavaThread that can be tested from
 383   // within the VM.
 384   //
 385   address generate_catch_exception() {
 386     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 387 
 388     address start = __ pc();
 389 
 390     // Registers alive
 391     //
 392     //  R16_thread
 393     //  R3_ARG1 - address of pending exception
 394     //  R4_ARG2 - return address in call stub
 395 
 396     const Register exception_file = R21_tmp1;
 397     const Register exception_line = R22_tmp2;
 398 
 399     __ load_const(exception_file, (void*)__FILE__);
 400     __ load_const(exception_line, (void*)__LINE__);
 401 
 402     __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
 403     // store into `char *'
 404     __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
 405     // store into `int'
 406     __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
 407 
 408     // complete return to VM
 409     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 410 
 411     __ mtlr(R4_ARG2);
 412     // continue in call stub
 413     __ blr();
 414 
 415     return start;
 416   }
 417 
 418   // Continuation point for runtime calls returning with a pending
 419   // exception.  The pending exception check happened in the runtime
 420   // or native call stub.  The pending exception in Thread is
 421   // converted into a Java-level exception.
 422   //
 423   // Read:
 424   //
 425   //   LR:     The pc the runtime library callee wants to return to.
 426   //           Since the exception occurred in the callee, the return pc
 427   //           from the point of view of Java is the exception pc.
 428   //   thread: Needed for method handles.
 429   //
 430   // Invalidate:
 431   //
 432   //   volatile registers (except below).
 433   //
 434   // Update:
 435   //
 436   //   R4_ARG2: exception
 437   //
 438   // (LR is unchanged and is live out).
 439   //
 440   address generate_forward_exception() {
 441     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 442     address start = __ pc();
 443 
 444 #if !defined(PRODUCT)
 445     if (VerifyOops) {
 446       // Get pending exception oop.
 447       __ ld(R3_ARG1,
 448                 in_bytes(Thread::pending_exception_offset()),
 449                 R16_thread);
 450       // Make sure that this code is only executed if there is a pending exception.
 451       {
 452         Label L;
 453         __ cmpdi(CCR0, R3_ARG1, 0);
 454         __ bne(CCR0, L);
 455         __ stop("StubRoutines::forward exception: no pending exception (1)");
 456         __ bind(L);
 457       }
 458       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
 459     }
 460 #endif
 461 
 462     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
 463     __ save_LR_CR(R4_ARG2);
 464     __ push_frame_reg_args(0, R0);
 465     // Find exception handler.
 466     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 467                      SharedRuntime::exception_handler_for_return_address),
 468                     R16_thread,
 469                     R4_ARG2);
 470     // Copy handler's address.
 471     __ mtctr(R3_RET);
 472     __ pop_frame();
 473     __ restore_LR_CR(R0);
 474 
 475     // Set up the arguments for the exception handler:
 476     //  - R3_ARG1: exception oop
 477     //  - R4_ARG2: exception pc.
 478 
 479     // Load pending exception oop.
 480     __ ld(R3_ARG1,
 481               in_bytes(Thread::pending_exception_offset()),
 482               R16_thread);
 483 
 484     // The exception pc is the return address in the caller.
 485     // Must load it into R4_ARG2.
 486     __ mflr(R4_ARG2);
 487 
 488 #ifdef ASSERT
 489     // Make sure exception is set.
 490     {
 491       Label L;
 492       __ cmpdi(CCR0, R3_ARG1, 0);
 493       __ bne(CCR0, L);
 494       __ stop("StubRoutines::forward exception: no pending exception (2)");
 495       __ bind(L);
 496     }
 497 #endif
 498 
 499     // Clear the pending exception.
 500     __ li(R0, 0);
 501     __ std(R0,
 502                in_bytes(Thread::pending_exception_offset()),
 503                R16_thread);
 504     // Jump to exception handler.
 505     __ bctr();
 506 
 507     return start;
 508   }
 509 
 510 #undef __
 511 #define __ masm->
 512   // Continuation point for throwing of implicit exceptions that are
 513   // not handled in the current activation. Fabricates an exception
 514   // oop and initiates normal exception dispatching in this
 515   // frame. Only callee-saved registers are preserved (through the
 516   // normal register window / RegisterMap handling).  If the compiler
 517   // needs all registers to be preserved between the fault point and
 518   // the exception handler then it must assume responsibility for that
 519   // in AbstractCompiler::continuation_for_implicit_null_exception or
 520   // continuation_for_implicit_division_by_zero_exception. All other
 521   // implicit exceptions (e.g., NullPointerException or
 522   // AbstractMethodError on entry) are either at call sites or
 523   // otherwise assume that stack unwinding will be initiated, so
 524   // caller saved registers were assumed volatile in the compiler.
 525   //
 526   // Note that we generate only this stub into a RuntimeStub, because
 527   // it needs to be properly traversed and ignored during GC, so we
 528   // change the meaning of the "__" macro within this method.
 529   //
 530   // Note: the routine set_pc_not_at_call_for_caller in
 531   // SharedRuntime.cpp requires that this code be generated into a
 532   // RuntimeStub.
 533   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 534                                    Register arg1 = noreg, Register arg2 = noreg) {
 535     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
 536     MacroAssembler* masm = new MacroAssembler(&code);
 537 
 538     OopMapSet* oop_maps  = new OopMapSet();
 539     int frame_size_in_bytes = frame::abi_reg_args_size;
 540     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
 541 
 542     address start = __ pc();
 543 
 544     __ save_LR_CR(R11_scratch1);
 545 
 546     // Push a frame.
 547     __ push_frame_reg_args(0, R11_scratch1);
 548 
 549     address frame_complete_pc = __ pc();
 550 
 551     if (restore_saved_exception_pc) {
 552       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
 553     }
 554 
 555     // Note that we always have a runtime stub frame on the top of
 556     // stack by this point. Remember the offset of the instruction
 557     // whose address will be moved to R11_scratch1.
 558     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
 559 
 560     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
 561 
 562     __ mr(R3_ARG1, R16_thread);
 563     if (arg1 != noreg) {
 564       __ mr(R4_ARG2, arg1);
 565     }
 566     if (arg2 != noreg) {
 567       __ mr(R5_ARG3, arg2);
 568     }
 569 #if defined(ABI_ELFv2)
 570     __ call_c(runtime_entry, relocInfo::none);
 571 #else
 572     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
 573 #endif
 574 
 575     // Set an oopmap for the call site.
 576     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
 577 
 578     __ reset_last_Java_frame();
 579 
 580 #ifdef ASSERT
 581     // Make sure that this code is only executed if there is a pending
 582     // exception.
 583     {
 584       Label L;
 585       __ ld(R0,
 586                 in_bytes(Thread::pending_exception_offset()),
 587                 R16_thread);
 588       __ cmpdi(CCR0, R0, 0);
 589       __ bne(CCR0, L);
 590       __ stop("StubRoutines::throw_exception: no pending exception");
 591       __ bind(L);
 592     }
 593 #endif
 594 
 595     // Pop frame.
 596     __ pop_frame();
 597 
 598     __ restore_LR_CR(R11_scratch1);
 599 
 600     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
 601     __ mtctr(R11_scratch1);
 602     __ bctr();
 603 
 604     // Create runtime stub with OopMap.
 605     RuntimeStub* stub =
 606       RuntimeStub::new_runtime_stub(name, &code,
 607                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
 608                                     frame_size_in_bytes/wordSize,
 609                                     oop_maps,
 610                                     false);
 611     return stub->entry_point();
 612   }
 613 #undef __
 614 #define __ _masm->
 615 
 616   //  Generate G1 pre-write barrier for array.
 617   //
 618   //  Input:
 619   //     from     - register containing src address (only needed for spilling)
 620   //     to       - register containing starting address
 621   //     count    - register containing element count
 622   //     tmp      - scratch register
 623   //
 624   //  Kills:
 625   //     nothing
 626   //
 627   void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
 628                                        Register preserve1 = noreg, Register preserve2 = noreg) {
 629     BarrierSet* const bs = Universe::heap()->barrier_set();
 630     switch (bs->kind()) {
 631       case BarrierSet::G1SATBCTLogging:
 632         // With G1, don't generate the call if we statically know that the target in uninitialized
 633         if (!dest_uninitialized) {
 634           int spill_slots = 3;
 635           if (preserve1 != noreg) { spill_slots++; }
 636           if (preserve2 != noreg) { spill_slots++; }
 637           const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 638           Label filtered;
 639 
 640           // Is marking active?
 641           if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 642             __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
 643           } else {
 644             guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 645             __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
 646           }
 647           __ cmpdi(CCR0, Rtmp1, 0);
 648           __ beq(CCR0, filtered);
 649 
 650           __ save_LR_CR(R0);
 651           __ push_frame(frame_size, R0);
 652           int slot_nr = 0;
 653           __ std(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
 654           __ std(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
 655           __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
 656           if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
 657           if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 658 
 659           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
 660 
 661           slot_nr = 0;
 662           __ ld(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
 663           __ ld(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
 664           __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
 665           if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
 666           if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 667           __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
 668           __ restore_LR_CR(R0);
 669 
 670           __ bind(filtered);
 671         }
 672         break;
 673       case BarrierSet::CardTableForRS:
 674       case BarrierSet::CardTableExtension:
 675       case BarrierSet::ModRef:
 676         break;
 677       default:
 678         ShouldNotReachHere();
 679     }
 680   }
 681 
 682   //  Generate CMS/G1 post-write barrier for array.
 683   //
 684   //  Input:
 685   //     addr     - register containing starting address
 686   //     count    - register containing element count
 687   //     tmp      - scratch register
 688   //
 689   //  The input registers and R0 are overwritten.
 690   //
 691   void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
 692     BarrierSet* const bs = Universe::heap()->barrier_set();
 693 
 694     switch (bs->kind()) {
 695       case BarrierSet::G1SATBCTLogging:
 696         {
 697           int spill_slots = (preserve != noreg) ? 1 : 0;
 698           const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 699 
 700           __ save_LR_CR(R0);
 701           __ push_frame(frame_size, R0);
 702           if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
 703           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
 704           if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
 705           __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
 706           __ restore_LR_CR(R0);
 707         }
 708         break;
 709       case BarrierSet::CardTableForRS:
 710       case BarrierSet::CardTableExtension:
 711         {
 712           Label Lskip_loop, Lstore_loop;
 713           if (UseConcMarkSweepGC) {
 714             // TODO PPC port: contribute optimization / requires shared changes
 715             __ release();
 716           }
 717 
 718           CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
 719           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 720           assert_different_registers(addr, count, tmp);
 721 
 722           __ sldi(count, count, LogBytesPerHeapOop);
 723           __ addi(count, count, -BytesPerHeapOop);
 724           __ add(count, addr, count);
 725           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 726           __ srdi(addr, addr, CardTableModRefBS::card_shift);
 727           __ srdi(count, count, CardTableModRefBS::card_shift);
 728           __ subf(count, addr, count);
 729           assert_different_registers(R0, addr, count, tmp);
 730           __ load_const(tmp, (address)ct->byte_map_base);
 731           __ addic_(count, count, 1);
 732           __ beq(CCR0, Lskip_loop);
 733           __ li(R0, 0);
 734           __ mtctr(count);
 735           // Byte store loop
 736           __ bind(Lstore_loop);
 737           __ stbx(R0, tmp, addr);
 738           __ addi(addr, addr, 1);
 739           __ bdnz(Lstore_loop);
 740           __ bind(Lskip_loop);
 741         }
 742       break;
 743       case BarrierSet::ModRef:
 744         break;
 745       default:
 746         ShouldNotReachHere();
 747     }
 748   }
 749 
 750   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
 751   //
 752   // Arguments:
 753   //   to:
 754   //   count:
 755   //
 756   // Destroys:
 757   //
 758   address generate_zero_words_aligned8() {
 759     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
 760 
 761     // Implemented as in ClearArray.
 762     address start = __ function_entry();
 763 
 764     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
 765     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
 766     Register tmp1_reg       = R5_ARG3;
 767     Register tmp2_reg       = R6_ARG4;
 768     Register zero_reg       = R7_ARG5;
 769 
 770     // Procedure for large arrays (uses data cache block zero instruction).
 771     Label dwloop, fast, fastloop, restloop, lastdword, done;
 772     int cl_size = VM_Version::L1_data_cache_line_size();
 773     int cl_dwords = cl_size >> 3;
 774     int cl_dwordaddr_bits = exact_log2(cl_dwords);
 775     int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
 776 
 777     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
 778     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
 779     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
 780     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
 781     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
 782 
 783     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
 784     __ beq(CCR0, lastdword);                    // size <= 1
 785     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
 786     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
 787     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
 788 
 789     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
 790     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
 791 
 792     __ beq(CCR0, fast);                         // already 128byte aligned
 793     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
 794     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
 795 
 796     // Clear in first cache line dword-by-dword if not already 128byte aligned.
 797     __ bind(dwloop);
 798       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 799       __ addi(base_ptr_reg, base_ptr_reg, 8);
 800     __ bdnz(dwloop);
 801 
 802     // clear 128byte blocks
 803     __ bind(fast);
 804     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
 805     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
 806 
 807     __ mtctr(tmp1_reg);                         // load counter
 808     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
 809     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
 810 
 811     __ bind(fastloop);
 812       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
 813       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
 814     __ bdnz(fastloop);
 815 
 816     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
 817     __ beq(CCR0, lastdword);                    // rest<=1
 818     __ mtctr(tmp1_reg);                         // load counter
 819 
 820     // Clear rest.
 821     __ bind(restloop);
 822       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
 823       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
 824       __ addi(base_ptr_reg, base_ptr_reg, 16);
 825     __ bdnz(restloop);
 826 
 827     __ bind(lastdword);
 828     __ beq(CCR1, done);
 829     __ std(zero_reg, 0, base_ptr_reg);
 830     __ bind(done);
 831     __ blr();                                   // return
 832 
 833     return start;
 834   }
 835 
 836   // The following routine generates a subroutine to throw an asynchronous
 837   // UnknownError when an unsafe access gets a fault that could not be
 838   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 839   //
 840   address generate_handler_for_unsafe_access() {
 841     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 842     address start = __ function_entry();
 843     __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
 844     return start;
 845   }
 846 
 847 #if !defined(PRODUCT)
 848   // Wrapper which calls oopDesc::is_oop_or_null()
 849   // Only called by MacroAssembler::verify_oop
 850   static void verify_oop_helper(const char* message, oop o) {
 851     if (!o->is_oop_or_null()) {
 852       fatal("%s", message);
 853     }
 854     ++ StubRoutines::_verify_oop_count;
 855   }
 856 #endif
 857 
 858   // Return address of code to be called from code generated by
 859   // MacroAssembler::verify_oop.
 860   //
 861   // Don't generate, rather use C++ code.
 862   address generate_verify_oop() {
 863     // this is actually a `FunctionDescriptor*'.
 864     address start = 0;
 865 
 866 #if !defined(PRODUCT)
 867     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 868 #endif
 869 
 870     return start;
 871   }
 872 
 873   // Fairer handling of safepoints for native methods.
 874   //
 875   // Generate code which reads from the polling page. This special handling is needed as the
 876   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 877   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 878   // to read from the safepoint polling page.
 879   address generate_load_from_poll() {
 880     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 881     address start = __ function_entry();
 882     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 883     return start;
 884   }
 885 
 886   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 887   //
 888   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 889   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 890   //
 891   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 892   // for turning on loop predication optimization, and hence the behavior of "array range check"
 893   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 894   //
 895   // Generate stub for disjoint short fill. If "aligned" is true, the
 896   // "to" address is assumed to be heapword aligned.
 897   //
 898   // Arguments for generated stub:
 899   //   to:    R3_ARG1
 900   //   value: R4_ARG2
 901   //   count: R5_ARG3 treated as signed
 902   //
 903   address generate_fill(BasicType t, bool aligned, const char* name) {
 904     StubCodeMark mark(this, "StubRoutines", name);
 905     address start = __ function_entry();
 906 
 907     const Register to    = R3_ARG1;   // source array address
 908     const Register value = R4_ARG2;   // fill value
 909     const Register count = R5_ARG3;   // elements count
 910     const Register temp  = R6_ARG4;   // temp register
 911 
 912     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
 913 
 914     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
 915     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
 916 
 917     int shift = -1;
 918     switch (t) {
 919        case T_BYTE:
 920         shift = 2;
 921         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 922         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
 923         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 924         __ blt(CCR0, L_fill_elements);
 925         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 926         break;
 927        case T_SHORT:
 928         shift = 1;
 929         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
 930         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
 931         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 932         __ blt(CCR0, L_fill_elements);
 933         break;
 934       case T_INT:
 935         shift = 0;
 936         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
 937         __ blt(CCR0, L_fill_4_bytes);
 938         break;
 939       default: ShouldNotReachHere();
 940     }
 941 
 942     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
 943       // Align source address at 4 bytes address boundary.
 944       if (t == T_BYTE) {
 945         // One byte misalignment happens only for byte arrays.
 946         __ andi_(temp, to, 1);
 947         __ beq(CCR0, L_skip_align1);
 948         __ stb(value, 0, to);
 949         __ addi(to, to, 1);
 950         __ addi(count, count, -1);
 951         __ bind(L_skip_align1);
 952       }
 953       // Two bytes misalignment happens only for byte and short (char) arrays.
 954       __ andi_(temp, to, 2);
 955       __ beq(CCR0, L_skip_align2);
 956       __ sth(value, 0, to);
 957       __ addi(to, to, 2);
 958       __ addi(count, count, -(1 << (shift - 1)));
 959       __ bind(L_skip_align2);
 960     }
 961 
 962     if (!aligned) {
 963       // Align to 8 bytes, we know we are 4 byte aligned to start.
 964       __ andi_(temp, to, 7);
 965       __ beq(CCR0, L_fill_32_bytes);
 966       __ stw(value, 0, to);
 967       __ addi(to, to, 4);
 968       __ addi(count, count, -(1 << shift));
 969       __ bind(L_fill_32_bytes);
 970     }
 971 
 972     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
 973     // Clone bytes int->long as above.
 974     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
 975 
 976     Label L_check_fill_8_bytes;
 977     // Fill 32-byte chunks.
 978     __ subf_(count, temp, count);
 979     __ blt(CCR0, L_check_fill_8_bytes);
 980 
 981     Label L_fill_32_bytes_loop;
 982     __ align(32);
 983     __ bind(L_fill_32_bytes_loop);
 984 
 985     __ std(value, 0, to);
 986     __ std(value, 8, to);
 987     __ subf_(count, temp, count);           // Update count.
 988     __ std(value, 16, to);
 989     __ std(value, 24, to);
 990 
 991     __ addi(to, to, 32);
 992     __ bge(CCR0, L_fill_32_bytes_loop);
 993 
 994     __ bind(L_check_fill_8_bytes);
 995     __ add_(count, temp, count);
 996     __ beq(CCR0, L_exit);
 997     __ addic_(count, count, -(2 << shift));
 998     __ blt(CCR0, L_fill_4_bytes);
 999 
1000     //
1001     // Length is too short, just fill 8 bytes at a time.
1002     //
1003     Label L_fill_8_bytes_loop;
1004     __ bind(L_fill_8_bytes_loop);
1005     __ std(value, 0, to);
1006     __ addic_(count, count, -(2 << shift));
1007     __ addi(to, to, 8);
1008     __ bge(CCR0, L_fill_8_bytes_loop);
1009 
1010     // Fill trailing 4 bytes.
1011     __ bind(L_fill_4_bytes);
1012     __ andi_(temp, count, 1<<shift);
1013     __ beq(CCR0, L_fill_2_bytes);
1014 
1015     __ stw(value, 0, to);
1016     if (t == T_BYTE || t == T_SHORT) {
1017       __ addi(to, to, 4);
1018       // Fill trailing 2 bytes.
1019       __ bind(L_fill_2_bytes);
1020       __ andi_(temp, count, 1<<(shift-1));
1021       __ beq(CCR0, L_fill_byte);
1022       __ sth(value, 0, to);
1023       if (t == T_BYTE) {
1024         __ addi(to, to, 2);
1025         // Fill trailing byte.
1026         __ bind(L_fill_byte);
1027         __ andi_(count, count, 1);
1028         __ beq(CCR0, L_exit);
1029         __ stb(value, 0, to);
1030       } else {
1031         __ bind(L_fill_byte);
1032       }
1033     } else {
1034       __ bind(L_fill_2_bytes);
1035     }
1036     __ bind(L_exit);
1037     __ blr();
1038 
1039     // Handle copies less than 8 bytes. Int is handled elsewhere.
1040     if (t == T_BYTE) {
1041       __ bind(L_fill_elements);
1042       Label L_fill_2, L_fill_4;
1043       __ andi_(temp, count, 1);
1044       __ beq(CCR0, L_fill_2);
1045       __ stb(value, 0, to);
1046       __ addi(to, to, 1);
1047       __ bind(L_fill_2);
1048       __ andi_(temp, count, 2);
1049       __ beq(CCR0, L_fill_4);
1050       __ stb(value, 0, to);
1051       __ stb(value, 0, to);
1052       __ addi(to, to, 2);
1053       __ bind(L_fill_4);
1054       __ andi_(temp, count, 4);
1055       __ beq(CCR0, L_exit);
1056       __ stb(value, 0, to);
1057       __ stb(value, 1, to);
1058       __ stb(value, 2, to);
1059       __ stb(value, 3, to);
1060       __ blr();
1061     }
1062 
1063     if (t == T_SHORT) {
1064       Label L_fill_2;
1065       __ bind(L_fill_elements);
1066       __ andi_(temp, count, 1);
1067       __ beq(CCR0, L_fill_2);
1068       __ sth(value, 0, to);
1069       __ addi(to, to, 2);
1070       __ bind(L_fill_2);
1071       __ andi_(temp, count, 2);
1072       __ beq(CCR0, L_exit);
1073       __ sth(value, 0, to);
1074       __ sth(value, 2, to);
1075       __ blr();
1076     }
1077     return start;
1078   }
1079 
1080   inline void assert_positive_int(Register count) {
1081 #ifdef ASSERT
1082     __ srdi_(R0, count, 31);
1083     __ asm_assert_eq("missing zero extend", 0xAFFE);
1084 #endif
1085   }
1086 
1087   // Generate overlap test for array copy stubs.
1088   //
1089   // Input:
1090   //   R3_ARG1    -  from
1091   //   R4_ARG2    -  to
1092   //   R5_ARG3    -  element count
1093   //
1094   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1095     Register tmp1 = R6_ARG4;
1096     Register tmp2 = R7_ARG5;
1097 
1098     assert_positive_int(R5_ARG3);
1099 
1100     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1101     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1102     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1103     __ cmpld(CCR1, tmp1, tmp2);
1104     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
1105     // Overlaps if Src before dst and distance smaller than size.
1106     // Branch to forward copy routine otherwise (within range of 32kB).
1107     __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
1108 
1109     // need to copy backwards
1110   }
1111 
1112   // The guideline in the implementations of generate_disjoint_xxx_copy
1113   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1114   // single instructions, but to avoid alignment interrupts (see subsequent
1115   // comment). Furthermore, we try to minimize misaligned access, even
1116   // though they cause no alignment interrupt.
1117   //
1118   // In Big-Endian mode, the PowerPC architecture requires implementations to
1119   // handle automatically misaligned integer halfword and word accesses,
1120   // word-aligned integer doubleword accesses, and word-aligned floating-point
1121   // accesses. Other accesses may or may not generate an Alignment interrupt
1122   // depending on the implementation.
1123   // Alignment interrupt handling may require on the order of hundreds of cycles,
1124   // so every effort should be made to avoid misaligned memory values.
1125   //
1126   //
1127   // Generate stub for disjoint byte copy.  If "aligned" is true, the
1128   // "from" and "to" addresses are assumed to be heapword aligned.
1129   //
1130   // Arguments for generated stub:
1131   //      from:  R3_ARG1
1132   //      to:    R4_ARG2
1133   //      count: R5_ARG3 treated as signed
1134   //
1135   address generate_disjoint_byte_copy(bool aligned, const char * name) {
1136     StubCodeMark mark(this, "StubRoutines", name);
1137     address start = __ function_entry();
1138     assert_positive_int(R5_ARG3);
1139 
1140     Register tmp1 = R6_ARG4;
1141     Register tmp2 = R7_ARG5;
1142     Register tmp3 = R8_ARG6;
1143     Register tmp4 = R9_ARG7;
1144 
1145     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1146 
1147     // Don't try anything fancy if arrays don't have many elements.
1148     __ li(tmp3, 0);
1149     __ cmpwi(CCR0, R5_ARG3, 17);
1150     __ ble(CCR0, l_6); // copy 4 at a time
1151 
1152     if (!aligned) {
1153       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1154       __ andi_(tmp1, tmp1, 3);
1155       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1156 
1157       // Copy elements if necessary to align to 4 bytes.
1158       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1159       __ andi_(tmp1, tmp1, 3);
1160       __ beq(CCR0, l_2);
1161 
1162       __ subf(R5_ARG3, tmp1, R5_ARG3);
1163       __ bind(l_9);
1164       __ lbz(tmp2, 0, R3_ARG1);
1165       __ addic_(tmp1, tmp1, -1);
1166       __ stb(tmp2, 0, R4_ARG2);
1167       __ addi(R3_ARG1, R3_ARG1, 1);
1168       __ addi(R4_ARG2, R4_ARG2, 1);
1169       __ bne(CCR0, l_9);
1170 
1171       __ bind(l_2);
1172     }
1173 
1174     // copy 8 elements at a time
1175     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1176     __ andi_(tmp1, tmp2, 7);
1177     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1178 
1179     // copy a 2-element word if necessary to align to 8 bytes
1180     __ andi_(R0, R3_ARG1, 7);
1181     __ beq(CCR0, l_7);
1182 
1183     __ lwzx(tmp2, R3_ARG1, tmp3);
1184     __ addi(R5_ARG3, R5_ARG3, -4);
1185     __ stwx(tmp2, R4_ARG2, tmp3);
1186     { // FasterArrayCopy
1187       __ addi(R3_ARG1, R3_ARG1, 4);
1188       __ addi(R4_ARG2, R4_ARG2, 4);
1189     }
1190     __ bind(l_7);
1191 
1192     { // FasterArrayCopy
1193       __ cmpwi(CCR0, R5_ARG3, 31);
1194       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1195 
1196       __ srdi(tmp1, R5_ARG3, 5);
1197       __ andi_(R5_ARG3, R5_ARG3, 31);
1198       __ mtctr(tmp1);
1199 
1200       __ bind(l_8);
1201       // Use unrolled version for mass copying (copy 32 elements a time)
1202       // Load feeding store gets zero latency on Power6, however not on Power5.
1203       // Therefore, the following sequence is made for the good of both.
1204       __ ld(tmp1, 0, R3_ARG1);
1205       __ ld(tmp2, 8, R3_ARG1);
1206       __ ld(tmp3, 16, R3_ARG1);
1207       __ ld(tmp4, 24, R3_ARG1);
1208       __ std(tmp1, 0, R4_ARG2);
1209       __ std(tmp2, 8, R4_ARG2);
1210       __ std(tmp3, 16, R4_ARG2);
1211       __ std(tmp4, 24, R4_ARG2);
1212       __ addi(R3_ARG1, R3_ARG1, 32);
1213       __ addi(R4_ARG2, R4_ARG2, 32);
1214       __ bdnz(l_8);
1215     }
1216 
1217     __ bind(l_6);
1218 
1219     // copy 4 elements at a time
1220     __ cmpwi(CCR0, R5_ARG3, 4);
1221     __ blt(CCR0, l_1);
1222     __ srdi(tmp1, R5_ARG3, 2);
1223     __ mtctr(tmp1); // is > 0
1224     __ andi_(R5_ARG3, R5_ARG3, 3);
1225 
1226     { // FasterArrayCopy
1227       __ addi(R3_ARG1, R3_ARG1, -4);
1228       __ addi(R4_ARG2, R4_ARG2, -4);
1229       __ bind(l_3);
1230       __ lwzu(tmp2, 4, R3_ARG1);
1231       __ stwu(tmp2, 4, R4_ARG2);
1232       __ bdnz(l_3);
1233       __ addi(R3_ARG1, R3_ARG1, 4);
1234       __ addi(R4_ARG2, R4_ARG2, 4);
1235     }
1236 
1237     // do single element copy
1238     __ bind(l_1);
1239     __ cmpwi(CCR0, R5_ARG3, 0);
1240     __ beq(CCR0, l_4);
1241 
1242     { // FasterArrayCopy
1243       __ mtctr(R5_ARG3);
1244       __ addi(R3_ARG1, R3_ARG1, -1);
1245       __ addi(R4_ARG2, R4_ARG2, -1);
1246 
1247       __ bind(l_5);
1248       __ lbzu(tmp2, 1, R3_ARG1);
1249       __ stbu(tmp2, 1, R4_ARG2);
1250       __ bdnz(l_5);
1251     }
1252 
1253     __ bind(l_4);
1254     __ li(R3_RET, 0); // return 0
1255     __ blr();
1256 
1257     return start;
1258   }
1259 
1260   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1261   // "from" and "to" addresses are assumed to be heapword aligned.
1262   //
1263   // Arguments for generated stub:
1264   //      from:  R3_ARG1
1265   //      to:    R4_ARG2
1266   //      count: R5_ARG3 treated as signed
1267   //
1268   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1269     StubCodeMark mark(this, "StubRoutines", name);
1270     address start = __ function_entry();
1271     assert_positive_int(R5_ARG3);
1272 
1273     Register tmp1 = R6_ARG4;
1274     Register tmp2 = R7_ARG5;
1275     Register tmp3 = R8_ARG6;
1276 
1277     address nooverlap_target = aligned ?
1278       STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
1279       STUB_ENTRY(jbyte_disjoint_arraycopy);
1280 
1281     array_overlap_test(nooverlap_target, 0);
1282     // Do reverse copy. We assume the case of actual overlap is rare enough
1283     // that we don't have to optimize it.
1284     Label l_1, l_2;
1285 
1286     __ b(l_2);
1287     __ bind(l_1);
1288     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1289     __ bind(l_2);
1290     __ addic_(R5_ARG3, R5_ARG3, -1);
1291     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1292     __ bge(CCR0, l_1);
1293 
1294     __ li(R3_RET, 0); // return 0
1295     __ blr();
1296 
1297     return start;
1298   }
1299 
1300   // Generate stub for disjoint short copy.  If "aligned" is true, the
1301   // "from" and "to" addresses are assumed to be heapword aligned.
1302   //
1303   // Arguments for generated stub:
1304   //      from:  R3_ARG1
1305   //      to:    R4_ARG2
1306   //  elm.count: R5_ARG3 treated as signed
1307   //
1308   // Strategy for aligned==true:
1309   //
1310   //  If length <= 9:
1311   //     1. copy 2 elements at a time (l_6)
1312   //     2. copy last element if original element count was odd (l_1)
1313   //
1314   //  If length > 9:
1315   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1316   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1317   //     3. copy last element if one was left in step 2. (l_1)
1318   //
1319   //
1320   // Strategy for aligned==false:
1321   //
1322   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1323   //                  can be unaligned (see comment below)
1324   //
1325   //  If length > 9:
1326   //     1. continue with step 6. if the alignment of from and to mod 4
1327   //        is different.
1328   //     2. align from and to to 4 bytes by copying 1 element if necessary
1329   //     3. at l_2 from and to are 4 byte aligned; continue with
1330   //        5. if they cannot be aligned to 8 bytes because they have
1331   //        got different alignment mod 8.
1332   //     4. at this point we know that both, from and to, have the same
1333   //        alignment mod 8, now copy one element if necessary to get
1334   //        8 byte alignment of from and to.
1335   //     5. copy 4 elements at a time until less than 4 elements are
1336   //        left; depending on step 3. all load/stores are aligned or
1337   //        either all loads or all stores are unaligned.
1338   //     6. copy 2 elements at a time until less than 2 elements are
1339   //        left (l_6); arriving here from step 1., there is a chance
1340   //        that all accesses are unaligned.
1341   //     7. copy last element if one was left in step 6. (l_1)
1342   //
1343   //  There are unaligned data accesses using integer load/store
1344   //  instructions in this stub. POWER allows such accesses.
1345   //
1346   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1347   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1348   //  integer load/stores have good performance. Only unaligned
1349   //  floating point load/stores can have poor performance.
1350   //
1351   //  TODO:
1352   //
1353   //  1. check if aligning the backbranch target of loops is beneficial
1354   //
1355   address generate_disjoint_short_copy(bool aligned, const char * name) {
1356     StubCodeMark mark(this, "StubRoutines", name);
1357 
1358     Register tmp1 = R6_ARG4;
1359     Register tmp2 = R7_ARG5;
1360     Register tmp3 = R8_ARG6;
1361     Register tmp4 = R9_ARG7;
1362 
1363     address start = __ function_entry();
1364     assert_positive_int(R5_ARG3);
1365 
1366       Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
1367 
1368     // don't try anything fancy if arrays don't have many elements
1369     __ li(tmp3, 0);
1370     __ cmpwi(CCR0, R5_ARG3, 9);
1371     __ ble(CCR0, l_6); // copy 2 at a time
1372 
1373     if (!aligned) {
1374       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1375       __ andi_(tmp1, tmp1, 3);
1376       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1377 
1378       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1379 
1380       // Copy 1 element if necessary to align to 4 bytes.
1381       __ andi_(tmp1, R3_ARG1, 3);
1382       __ beq(CCR0, l_2);
1383 
1384       __ lhz(tmp2, 0, R3_ARG1);
1385       __ addi(R3_ARG1, R3_ARG1, 2);
1386       __ sth(tmp2, 0, R4_ARG2);
1387       __ addi(R4_ARG2, R4_ARG2, 2);
1388       __ addi(R5_ARG3, R5_ARG3, -1);
1389       __ bind(l_2);
1390 
1391       // At this point the positions of both, from and to, are at least 4 byte aligned.
1392 
1393       // Copy 4 elements at a time.
1394       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1395       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1396       __ andi_(tmp1, tmp2, 7);
1397       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1398 
1399       // Copy a 2-element word if necessary to align to 8 bytes.
1400       __ andi_(R0, R3_ARG1, 7);
1401       __ beq(CCR0, l_7);
1402 
1403       __ lwzx(tmp2, R3_ARG1, tmp3);
1404       __ addi(R5_ARG3, R5_ARG3, -2);
1405       __ stwx(tmp2, R4_ARG2, tmp3);
1406       { // FasterArrayCopy
1407         __ addi(R3_ARG1, R3_ARG1, 4);
1408         __ addi(R4_ARG2, R4_ARG2, 4);
1409       }
1410     }
1411 
1412     __ bind(l_7);
1413 
1414     // Copy 4 elements at a time; either the loads or the stores can
1415     // be unaligned if aligned == false.
1416 
1417     { // FasterArrayCopy
1418       __ cmpwi(CCR0, R5_ARG3, 15);
1419       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1420 
1421       __ srdi(tmp1, R5_ARG3, 4);
1422       __ andi_(R5_ARG3, R5_ARG3, 15);
1423       __ mtctr(tmp1);
1424 
1425       __ bind(l_8);
1426       // Use unrolled version for mass copying (copy 16 elements a time).
1427       // Load feeding store gets zero latency on Power6, however not on Power5.
1428       // Therefore, the following sequence is made for the good of both.
1429       __ ld(tmp1, 0, R3_ARG1);
1430       __ ld(tmp2, 8, R3_ARG1);
1431       __ ld(tmp3, 16, R3_ARG1);
1432       __ ld(tmp4, 24, R3_ARG1);
1433       __ std(tmp1, 0, R4_ARG2);
1434       __ std(tmp2, 8, R4_ARG2);
1435       __ std(tmp3, 16, R4_ARG2);
1436       __ std(tmp4, 24, R4_ARG2);
1437       __ addi(R3_ARG1, R3_ARG1, 32);
1438       __ addi(R4_ARG2, R4_ARG2, 32);
1439       __ bdnz(l_8);
1440     }
1441     __ bind(l_6);
1442 
1443     // copy 2 elements at a time
1444     { // FasterArrayCopy
1445       __ cmpwi(CCR0, R5_ARG3, 2);
1446       __ blt(CCR0, l_1);
1447       __ srdi(tmp1, R5_ARG3, 1);
1448       __ andi_(R5_ARG3, R5_ARG3, 1);
1449 
1450       __ addi(R3_ARG1, R3_ARG1, -4);
1451       __ addi(R4_ARG2, R4_ARG2, -4);
1452       __ mtctr(tmp1);
1453 
1454       __ bind(l_3);
1455       __ lwzu(tmp2, 4, R3_ARG1);
1456       __ stwu(tmp2, 4, R4_ARG2);
1457       __ bdnz(l_3);
1458 
1459       __ addi(R3_ARG1, R3_ARG1, 4);
1460       __ addi(R4_ARG2, R4_ARG2, 4);
1461     }
1462 
1463     // do single element copy
1464     __ bind(l_1);
1465     __ cmpwi(CCR0, R5_ARG3, 0);
1466     __ beq(CCR0, l_4);
1467 
1468     { // FasterArrayCopy
1469       __ mtctr(R5_ARG3);
1470       __ addi(R3_ARG1, R3_ARG1, -2);
1471       __ addi(R4_ARG2, R4_ARG2, -2);
1472 
1473       __ bind(l_5);
1474       __ lhzu(tmp2, 2, R3_ARG1);
1475       __ sthu(tmp2, 2, R4_ARG2);
1476       __ bdnz(l_5);
1477     }
1478     __ bind(l_4);
1479     __ li(R3_RET, 0); // return 0
1480     __ blr();
1481 
1482     return start;
1483   }
1484 
1485   // Generate stub for conjoint short copy.  If "aligned" is true, the
1486   // "from" and "to" addresses are assumed to be heapword aligned.
1487   //
1488   // Arguments for generated stub:
1489   //      from:  R3_ARG1
1490   //      to:    R4_ARG2
1491   //      count: R5_ARG3 treated as signed
1492   //
1493   address generate_conjoint_short_copy(bool aligned, const char * name) {
1494     StubCodeMark mark(this, "StubRoutines", name);
1495     address start = __ function_entry();
1496     assert_positive_int(R5_ARG3);
1497 
1498     Register tmp1 = R6_ARG4;
1499     Register tmp2 = R7_ARG5;
1500     Register tmp3 = R8_ARG6;
1501 
1502     address nooverlap_target = aligned ?
1503       STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
1504       STUB_ENTRY(jshort_disjoint_arraycopy);
1505 
1506     array_overlap_test(nooverlap_target, 1);
1507 
1508     Label l_1, l_2;
1509     __ sldi(tmp1, R5_ARG3, 1);
1510     __ b(l_2);
1511     __ bind(l_1);
1512     __ sthx(tmp2, R4_ARG2, tmp1);
1513     __ bind(l_2);
1514     __ addic_(tmp1, tmp1, -2);
1515     __ lhzx(tmp2, R3_ARG1, tmp1);
1516     __ bge(CCR0, l_1);
1517 
1518     __ li(R3_RET, 0); // return 0
1519     __ blr();
1520 
1521     return start;
1522   }
1523 
1524   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1525   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1526   //
1527   // Arguments:
1528   //      from:  R3_ARG1
1529   //      to:    R4_ARG2
1530   //      count: R5_ARG3 treated as signed
1531   //
1532   void generate_disjoint_int_copy_core(bool aligned) {
1533     Register tmp1 = R6_ARG4;
1534     Register tmp2 = R7_ARG5;
1535     Register tmp3 = R8_ARG6;
1536     Register tmp4 = R0;
1537 
1538     Label l_1, l_2, l_3, l_4, l_5, l_6;
1539 
1540     // for short arrays, just do single element copy
1541     __ li(tmp3, 0);
1542     __ cmpwi(CCR0, R5_ARG3, 5);
1543     __ ble(CCR0, l_2);
1544 
1545     if (!aligned) {
1546         // check if arrays have same alignment mod 8.
1547         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1548         __ andi_(R0, tmp1, 7);
1549         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1550         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1551 
1552         // copy 1 element to align to and from on an 8 byte boundary
1553         __ andi_(R0, R3_ARG1, 7);
1554         __ beq(CCR0, l_4);
1555 
1556         __ lwzx(tmp2, R3_ARG1, tmp3);
1557         __ addi(R5_ARG3, R5_ARG3, -1);
1558         __ stwx(tmp2, R4_ARG2, tmp3);
1559         { // FasterArrayCopy
1560           __ addi(R3_ARG1, R3_ARG1, 4);
1561           __ addi(R4_ARG2, R4_ARG2, 4);
1562         }
1563         __ bind(l_4);
1564       }
1565 
1566     { // FasterArrayCopy
1567       __ cmpwi(CCR0, R5_ARG3, 7);
1568       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1569 
1570       __ srdi(tmp1, R5_ARG3, 3);
1571       __ andi_(R5_ARG3, R5_ARG3, 7);
1572       __ mtctr(tmp1);
1573 
1574       __ bind(l_6);
1575       // Use unrolled version for mass copying (copy 8 elements a time).
1576       // Load feeding store gets zero latency on power6, however not on power 5.
1577       // Therefore, the following sequence is made for the good of both.
1578       __ ld(tmp1, 0, R3_ARG1);
1579       __ ld(tmp2, 8, R3_ARG1);
1580       __ ld(tmp3, 16, R3_ARG1);
1581       __ ld(tmp4, 24, R3_ARG1);
1582       __ std(tmp1, 0, R4_ARG2);
1583       __ std(tmp2, 8, R4_ARG2);
1584       __ std(tmp3, 16, R4_ARG2);
1585       __ std(tmp4, 24, R4_ARG2);
1586       __ addi(R3_ARG1, R3_ARG1, 32);
1587       __ addi(R4_ARG2, R4_ARG2, 32);
1588       __ bdnz(l_6);
1589     }
1590 
1591     // copy 1 element at a time
1592     __ bind(l_2);
1593     __ cmpwi(CCR0, R5_ARG3, 0);
1594     __ beq(CCR0, l_1);
1595 
1596     { // FasterArrayCopy
1597       __ mtctr(R5_ARG3);
1598       __ addi(R3_ARG1, R3_ARG1, -4);
1599       __ addi(R4_ARG2, R4_ARG2, -4);
1600 
1601       __ bind(l_3);
1602       __ lwzu(tmp2, 4, R3_ARG1);
1603       __ stwu(tmp2, 4, R4_ARG2);
1604       __ bdnz(l_3);
1605     }
1606 
1607     __ bind(l_1);
1608     return;
1609   }
1610 
1611   // Generate stub for disjoint int copy.  If "aligned" is true, the
1612   // "from" and "to" addresses are assumed to be heapword aligned.
1613   //
1614   // Arguments for generated stub:
1615   //      from:  R3_ARG1
1616   //      to:    R4_ARG2
1617   //      count: R5_ARG3 treated as signed
1618   //
1619   address generate_disjoint_int_copy(bool aligned, const char * name) {
1620     StubCodeMark mark(this, "StubRoutines", name);
1621     address start = __ function_entry();
1622     assert_positive_int(R5_ARG3);
1623     generate_disjoint_int_copy_core(aligned);
1624     __ li(R3_RET, 0); // return 0
1625     __ blr();
1626     return start;
1627   }
1628 
1629   // Generate core code for conjoint int copy (and oop copy on
1630   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1631   // are assumed to be heapword aligned.
1632   //
1633   // Arguments:
1634   //      from:  R3_ARG1
1635   //      to:    R4_ARG2
1636   //      count: R5_ARG3 treated as signed
1637   //
1638   void generate_conjoint_int_copy_core(bool aligned) {
1639     // Do reverse copy.  We assume the case of actual overlap is rare enough
1640     // that we don't have to optimize it.
1641 
1642     Label l_1, l_2, l_3, l_4, l_5, l_6;
1643 
1644     Register tmp1 = R6_ARG4;
1645     Register tmp2 = R7_ARG5;
1646     Register tmp3 = R8_ARG6;
1647     Register tmp4 = R0;
1648 
1649     { // FasterArrayCopy
1650       __ cmpwi(CCR0, R5_ARG3, 0);
1651       __ beq(CCR0, l_6);
1652 
1653       __ sldi(R5_ARG3, R5_ARG3, 2);
1654       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1655       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1656       __ srdi(R5_ARG3, R5_ARG3, 2);
1657 
1658       __ cmpwi(CCR0, R5_ARG3, 7);
1659       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1660 
1661       __ srdi(tmp1, R5_ARG3, 3);
1662       __ andi(R5_ARG3, R5_ARG3, 7);
1663       __ mtctr(tmp1);
1664 
1665       __ bind(l_4);
1666       // Use unrolled version for mass copying (copy 4 elements a time).
1667       // Load feeding store gets zero latency on Power6, however not on Power5.
1668       // Therefore, the following sequence is made for the good of both.
1669       __ addi(R3_ARG1, R3_ARG1, -32);
1670       __ addi(R4_ARG2, R4_ARG2, -32);
1671       __ ld(tmp4, 24, R3_ARG1);
1672       __ ld(tmp3, 16, R3_ARG1);
1673       __ ld(tmp2, 8, R3_ARG1);
1674       __ ld(tmp1, 0, R3_ARG1);
1675       __ std(tmp4, 24, R4_ARG2);
1676       __ std(tmp3, 16, R4_ARG2);
1677       __ std(tmp2, 8, R4_ARG2);
1678       __ std(tmp1, 0, R4_ARG2);
1679       __ bdnz(l_4);
1680 
1681       __ cmpwi(CCR0, R5_ARG3, 0);
1682       __ beq(CCR0, l_6);
1683 
1684       __ bind(l_5);
1685       __ mtctr(R5_ARG3);
1686       __ bind(l_3);
1687       __ lwz(R0, -4, R3_ARG1);
1688       __ stw(R0, -4, R4_ARG2);
1689       __ addi(R3_ARG1, R3_ARG1, -4);
1690       __ addi(R4_ARG2, R4_ARG2, -4);
1691       __ bdnz(l_3);
1692 
1693       __ bind(l_6);
1694     }
1695   }
1696 
1697   // Generate stub for conjoint int copy.  If "aligned" is true, the
1698   // "from" and "to" addresses are assumed to be heapword aligned.
1699   //
1700   // Arguments for generated stub:
1701   //      from:  R3_ARG1
1702   //      to:    R4_ARG2
1703   //      count: R5_ARG3 treated as signed
1704   //
1705   address generate_conjoint_int_copy(bool aligned, const char * name) {
1706     StubCodeMark mark(this, "StubRoutines", name);
1707     address start = __ function_entry();
1708     assert_positive_int(R5_ARG3);
1709     address nooverlap_target = aligned ?
1710       STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
1711       STUB_ENTRY(jint_disjoint_arraycopy);
1712 
1713     array_overlap_test(nooverlap_target, 2);
1714 
1715     generate_conjoint_int_copy_core(aligned);
1716 
1717     __ li(R3_RET, 0); // return 0
1718     __ blr();
1719 
1720     return start;
1721   }
1722 
1723   // Generate core code for disjoint long copy (and oop copy on
1724   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1725   // are assumed to be heapword aligned.
1726   //
1727   // Arguments:
1728   //      from:  R3_ARG1
1729   //      to:    R4_ARG2
1730   //      count: R5_ARG3 treated as signed
1731   //
1732   void generate_disjoint_long_copy_core(bool aligned) {
1733     Register tmp1 = R6_ARG4;
1734     Register tmp2 = R7_ARG5;
1735     Register tmp3 = R8_ARG6;
1736     Register tmp4 = R0;
1737 
1738     Label l_1, l_2, l_3, l_4;
1739 
1740     { // FasterArrayCopy
1741       __ cmpwi(CCR0, R5_ARG3, 3);
1742       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1743 
1744       __ srdi(tmp1, R5_ARG3, 2);
1745       __ andi_(R5_ARG3, R5_ARG3, 3);
1746       __ mtctr(tmp1);
1747 
1748       __ bind(l_4);
1749       // Use unrolled version for mass copying (copy 4 elements a time).
1750       // Load feeding store gets zero latency on Power6, however not on Power5.
1751       // Therefore, the following sequence is made for the good of both.
1752       __ ld(tmp1, 0, R3_ARG1);
1753       __ ld(tmp2, 8, R3_ARG1);
1754       __ ld(tmp3, 16, R3_ARG1);
1755       __ ld(tmp4, 24, R3_ARG1);
1756       __ std(tmp1, 0, R4_ARG2);
1757       __ std(tmp2, 8, R4_ARG2);
1758       __ std(tmp3, 16, R4_ARG2);
1759       __ std(tmp4, 24, R4_ARG2);
1760       __ addi(R3_ARG1, R3_ARG1, 32);
1761       __ addi(R4_ARG2, R4_ARG2, 32);
1762       __ bdnz(l_4);
1763     }
1764 
1765     // copy 1 element at a time
1766     __ bind(l_3);
1767     __ cmpwi(CCR0, R5_ARG3, 0);
1768     __ beq(CCR0, l_1);
1769 
1770     { // FasterArrayCopy
1771       __ mtctr(R5_ARG3);
1772       __ addi(R3_ARG1, R3_ARG1, -8);
1773       __ addi(R4_ARG2, R4_ARG2, -8);
1774 
1775       __ bind(l_2);
1776       __ ldu(R0, 8, R3_ARG1);
1777       __ stdu(R0, 8, R4_ARG2);
1778       __ bdnz(l_2);
1779 
1780     }
1781     __ bind(l_1);
1782   }
1783 
1784   // Generate stub for disjoint long copy.  If "aligned" is true, the
1785   // "from" and "to" addresses are assumed to be heapword aligned.
1786   //
1787   // Arguments for generated stub:
1788   //      from:  R3_ARG1
1789   //      to:    R4_ARG2
1790   //      count: R5_ARG3 treated as signed
1791   //
1792   address generate_disjoint_long_copy(bool aligned, const char * name) {
1793     StubCodeMark mark(this, "StubRoutines", name);
1794     address start = __ function_entry();
1795     assert_positive_int(R5_ARG3);
1796     generate_disjoint_long_copy_core(aligned);
1797     __ li(R3_RET, 0); // return 0
1798     __ blr();
1799 
1800     return start;
1801   }
1802 
1803   // Generate core code for conjoint long copy (and oop copy on
1804   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1805   // are assumed to be heapword aligned.
1806   //
1807   // Arguments:
1808   //      from:  R3_ARG1
1809   //      to:    R4_ARG2
1810   //      count: R5_ARG3 treated as signed
1811   //
1812   void generate_conjoint_long_copy_core(bool aligned) {
1813     Register tmp1 = R6_ARG4;
1814     Register tmp2 = R7_ARG5;
1815     Register tmp3 = R8_ARG6;
1816     Register tmp4 = R0;
1817 
1818     Label l_1, l_2, l_3, l_4, l_5;
1819 
1820     __ cmpwi(CCR0, R5_ARG3, 0);
1821     __ beq(CCR0, l_1);
1822 
1823     { // FasterArrayCopy
1824       __ sldi(R5_ARG3, R5_ARG3, 3);
1825       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1826       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1827       __ srdi(R5_ARG3, R5_ARG3, 3);
1828 
1829       __ cmpwi(CCR0, R5_ARG3, 3);
1830       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1831 
1832       __ srdi(tmp1, R5_ARG3, 2);
1833       __ andi(R5_ARG3, R5_ARG3, 3);
1834       __ mtctr(tmp1);
1835 
1836       __ bind(l_4);
1837       // Use unrolled version for mass copying (copy 4 elements a time).
1838       // Load feeding store gets zero latency on Power6, however not on Power5.
1839       // Therefore, the following sequence is made for the good of both.
1840       __ addi(R3_ARG1, R3_ARG1, -32);
1841       __ addi(R4_ARG2, R4_ARG2, -32);
1842       __ ld(tmp4, 24, R3_ARG1);
1843       __ ld(tmp3, 16, R3_ARG1);
1844       __ ld(tmp2, 8, R3_ARG1);
1845       __ ld(tmp1, 0, R3_ARG1);
1846       __ std(tmp4, 24, R4_ARG2);
1847       __ std(tmp3, 16, R4_ARG2);
1848       __ std(tmp2, 8, R4_ARG2);
1849       __ std(tmp1, 0, R4_ARG2);
1850       __ bdnz(l_4);
1851 
1852       __ cmpwi(CCR0, R5_ARG3, 0);
1853       __ beq(CCR0, l_1);
1854 
1855       __ bind(l_5);
1856       __ mtctr(R5_ARG3);
1857       __ bind(l_3);
1858       __ ld(R0, -8, R3_ARG1);
1859       __ std(R0, -8, R4_ARG2);
1860       __ addi(R3_ARG1, R3_ARG1, -8);
1861       __ addi(R4_ARG2, R4_ARG2, -8);
1862       __ bdnz(l_3);
1863 
1864     }
1865     __ bind(l_1);
1866   }
1867 
1868   // Generate stub for conjoint long copy.  If "aligned" is true, the
1869   // "from" and "to" addresses are assumed to be heapword aligned.
1870   //
1871   // Arguments for generated stub:
1872   //      from:  R3_ARG1
1873   //      to:    R4_ARG2
1874   //      count: R5_ARG3 treated as signed
1875   //
1876   address generate_conjoint_long_copy(bool aligned, const char * name) {
1877     StubCodeMark mark(this, "StubRoutines", name);
1878     address start = __ function_entry();
1879     assert_positive_int(R5_ARG3);
1880     address nooverlap_target = aligned ?
1881       STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
1882       STUB_ENTRY(jlong_disjoint_arraycopy);
1883 
1884     array_overlap_test(nooverlap_target, 3);
1885     generate_conjoint_long_copy_core(aligned);
1886 
1887     __ li(R3_RET, 0); // return 0
1888     __ blr();
1889 
1890     return start;
1891   }
1892 
1893   // Generate stub for conjoint oop copy.  If "aligned" is true, the
1894   // "from" and "to" addresses are assumed to be heapword aligned.
1895   //
1896   // Arguments for generated stub:
1897   //      from:  R3_ARG1
1898   //      to:    R4_ARG2
1899   //      count: R5_ARG3 treated as signed
1900   //      dest_uninitialized: G1 support
1901   //
1902   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1903     StubCodeMark mark(this, "StubRoutines", name);
1904 
1905     address start = __ function_entry();
1906     assert_positive_int(R5_ARG3);
1907     address nooverlap_target = aligned ?
1908       STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
1909       STUB_ENTRY(oop_disjoint_arraycopy);
1910 
1911     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1912 
1913     // Save arguments.
1914     __ mr(R9_ARG7, R4_ARG2);
1915     __ mr(R10_ARG8, R5_ARG3);
1916 
1917     if (UseCompressedOops) {
1918       array_overlap_test(nooverlap_target, 2);
1919       generate_conjoint_int_copy_core(aligned);
1920     } else {
1921       array_overlap_test(nooverlap_target, 3);
1922       generate_conjoint_long_copy_core(aligned);
1923     }
1924 
1925     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
1926     __ li(R3_RET, 0); // return 0
1927     __ blr();
1928     return start;
1929   }
1930 
1931   // Generate stub for disjoint oop copy.  If "aligned" is true, the
1932   // "from" and "to" addresses are assumed to be heapword aligned.
1933   //
1934   // Arguments for generated stub:
1935   //      from:  R3_ARG1
1936   //      to:    R4_ARG2
1937   //      count: R5_ARG3 treated as signed
1938   //      dest_uninitialized: G1 support
1939   //
1940   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1941     StubCodeMark mark(this, "StubRoutines", name);
1942     address start = __ function_entry();
1943     assert_positive_int(R5_ARG3);
1944     gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1945 
1946     // save some arguments, disjoint_long_copy_core destroys them.
1947     // needed for post barrier
1948     __ mr(R9_ARG7, R4_ARG2);
1949     __ mr(R10_ARG8, R5_ARG3);
1950 
1951     if (UseCompressedOops) {
1952       generate_disjoint_int_copy_core(aligned);
1953     } else {
1954       generate_disjoint_long_copy_core(aligned);
1955     }
1956 
1957     gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
1958     __ li(R3_RET, 0); // return 0
1959     __ blr();
1960 
1961     return start;
1962   }
1963 
1964 
1965   // Helper for generating a dynamic type check.
1966   // Smashes only the given temp registers.
1967   void generate_type_check(Register sub_klass,
1968                            Register super_check_offset,
1969                            Register super_klass,
1970                            Register temp,
1971                            Label& L_success) {
1972     assert_different_registers(sub_klass, super_check_offset, super_klass);
1973 
1974     BLOCK_COMMENT("type_check:");
1975 
1976     Label L_miss;
1977 
1978     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
1979                                      super_check_offset);
1980     __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
1981 
1982     // Fall through on failure!
1983     __ bind(L_miss);
1984   }
1985 
1986 
1987   //  Generate stub for checked oop copy.
1988   //
1989   // Arguments for generated stub:
1990   //      from:  R3
1991   //      to:    R4
1992   //      count: R5 treated as signed
1993   //      ckoff: R6 (super_check_offset)
1994   //      ckval: R7 (super_klass)
1995   //      ret:   R3 zero for success; (-1^K) where K is partial transfer count
1996   //
1997   address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
1998 
1999     const Register R3_from   = R3_ARG1;      // source array address
2000     const Register R4_to     = R4_ARG2;      // destination array address
2001     const Register R5_count  = R5_ARG3;      // elements count
2002     const Register R6_ckoff  = R6_ARG4;      // super_check_offset
2003     const Register R7_ckval  = R7_ARG5;      // super_klass
2004 
2005     const Register R8_offset = R8_ARG6;      // loop var, with stride wordSize
2006     const Register R9_remain = R9_ARG7;      // loop var, with stride -1
2007     const Register R10_oop   = R10_ARG8;     // actual oop copied
2008     const Register R11_klass = R11_scratch1; // oop._klass
2009     const Register R12_tmp   = R12_scratch2;
2010 
2011     const Register R2_minus1 = R2;
2012 
2013     //__ align(CodeEntryAlignment);
2014     StubCodeMark mark(this, "StubRoutines", name);
2015     address start = __ function_entry();
2016 
2017     // Assert that int is 64 bit sign extended and arrays are not conjoint.
2018 #ifdef ASSERT
2019     {
2020     assert_positive_int(R5_ARG3);
2021     const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2022     Label no_overlap;
2023     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2024     __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2025     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2026     __ cmpld(CCR1, tmp1, tmp2);
2027     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2028     // Overlaps if Src before dst and distance smaller than size.
2029     // Branch to forward copy routine otherwise.
2030     __ blt(CCR0, no_overlap);
2031     __ stop("overlap in checkcast_copy", 0x9543);
2032     __ bind(no_overlap);
2033     }
2034 #endif
2035 
2036     gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
2037 
2038     //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2039 
2040     Label load_element, store_element, store_null, success, do_card_marks;
2041     __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2042     __ li(R8_offset, 0);                   // Offset from start of arrays.
2043     __ li(R2_minus1, -1);
2044     __ bne(CCR0, load_element);
2045 
2046     // Empty array: Nothing to do.
2047     __ li(R3_RET, 0);           // Return 0 on (trivial) success.
2048     __ blr();
2049 
2050     // ======== begin loop ========
2051     // (Entry is load_element.)
2052     __ align(OptoLoopAlignment);
2053     __ bind(store_element);
2054     if (UseCompressedOops) {
2055       __ encode_heap_oop_not_null(R10_oop);
2056       __ bind(store_null);
2057       __ stw(R10_oop, R8_offset, R4_to);
2058     } else {
2059       __ bind(store_null);
2060       __ std(R10_oop, R8_offset, R4_to);
2061     }
2062 
2063     __ addi(R8_offset, R8_offset, heapOopSize);   // Step to next offset.
2064     __ add_(R9_remain, R2_minus1, R9_remain);     // Decrement the count.
2065     __ beq(CCR0, success);
2066 
2067     // ======== loop entry is here ========
2068     __ bind(load_element);
2069     __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null);  // Load the oop.
2070 
2071     __ load_klass(R11_klass, R10_oop); // Query the object klass.
2072 
2073     generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2074                         // Branch to this on success:
2075                         store_element);
2076     // ======== end loop ========
2077 
2078     // It was a real error; we must depend on the caller to finish the job.
2079     // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2080     // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2081     // and report their number to the caller.
2082     __ subf_(R5_count, R9_remain, R5_count);
2083     __ nand(R3_RET, R5_count, R5_count);   // report (-1^K) to caller
2084     __ bne(CCR0, do_card_marks);
2085     __ blr();
2086 
2087     __ bind(success);
2088     __ li(R3_RET, 0);
2089 
2090     __ bind(do_card_marks);
2091     // Store check on R4_to[0..R5_count-1].
2092     gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
2093     __ blr();
2094     return start;
2095   }
2096 
2097 
2098   //  Generate 'unsafe' array copy stub.
2099   //  Though just as safe as the other stubs, it takes an unscaled
2100   //  size_t argument instead of an element count.
2101   //
2102   // Arguments for generated stub:
2103   //      from:  R3
2104   //      to:    R4
2105   //      count: R5 byte count, treated as ssize_t, can be zero
2106   //
2107   // Examines the alignment of the operands and dispatches
2108   // to a long, int, short, or byte copy loop.
2109   //
2110   address generate_unsafe_copy(const char* name,
2111                                address byte_copy_entry,
2112                                address short_copy_entry,
2113                                address int_copy_entry,
2114                                address long_copy_entry) {
2115 
2116     const Register R3_from   = R3_ARG1;      // source array address
2117     const Register R4_to     = R4_ARG2;      // destination array address
2118     const Register R5_count  = R5_ARG3;      // elements count (as long on PPC64)
2119 
2120     const Register R6_bits   = R6_ARG4;      // test copy of low bits
2121     const Register R7_tmp    = R7_ARG5;
2122 
2123     //__ align(CodeEntryAlignment);
2124     StubCodeMark mark(this, "StubRoutines", name);
2125     address start = __ function_entry();
2126 
2127     // Bump this on entry, not on exit:
2128     //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
2129 
2130     Label short_copy, int_copy, long_copy;
2131 
2132     __ orr(R6_bits, R3_from, R4_to);
2133     __ orr(R6_bits, R6_bits, R5_count);
2134     __ andi_(R0, R6_bits, (BytesPerLong-1));
2135     __ beq(CCR0, long_copy);
2136 
2137     __ andi_(R0, R6_bits, (BytesPerInt-1));
2138     __ beq(CCR0, int_copy);
2139 
2140     __ andi_(R0, R6_bits, (BytesPerShort-1));
2141     __ beq(CCR0, short_copy);
2142 
2143     // byte_copy:
2144     __ b(byte_copy_entry);
2145 
2146     __ bind(short_copy);
2147     __ srwi(R5_count, R5_count, LogBytesPerShort);
2148     __ b(short_copy_entry);
2149 
2150     __ bind(int_copy);
2151     __ srwi(R5_count, R5_count, LogBytesPerInt);
2152     __ b(int_copy_entry);
2153 
2154     __ bind(long_copy);
2155     __ srwi(R5_count, R5_count, LogBytesPerLong);
2156     __ b(long_copy_entry);
2157 
2158     return start;
2159   }
2160 
2161 
2162   // Perform range checks on the proposed arraycopy.
2163   // Kills the two temps, but nothing else.
2164   // Also, clean the sign bits of src_pos and dst_pos.
2165   void arraycopy_range_checks(Register src,     // source array oop
2166                               Register src_pos, // source position
2167                               Register dst,     // destination array oop
2168                               Register dst_pos, // destination position
2169                               Register length,  // length of copy
2170                               Register temp1, Register temp2,
2171                               Label& L_failed) {
2172     BLOCK_COMMENT("arraycopy_range_checks:");
2173 
2174     const Register array_length = temp1;  // scratch
2175     const Register end_pos      = temp2;  // scratch
2176 
2177     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2178     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
2179     __ add(end_pos, src_pos, length);  // src_pos + length
2180     __ cmpd(CCR0, end_pos, array_length);
2181     __ bgt(CCR0, L_failed);
2182 
2183     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2184     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
2185     __ add(end_pos, dst_pos, length);  // src_pos + length
2186     __ cmpd(CCR0, end_pos, array_length);
2187     __ bgt(CCR0, L_failed);
2188 
2189     BLOCK_COMMENT("arraycopy_range_checks done");
2190   }
2191 
2192 
2193   //
2194   //  Generate generic array copy stubs
2195   //
2196   //  Input:
2197   //    R3    -  src oop
2198   //    R4    -  src_pos
2199   //    R5    -  dst oop
2200   //    R6    -  dst_pos
2201   //    R7    -  element count
2202   //
2203   //  Output:
2204   //    R3 ==  0  -  success
2205   //    R3 == -1  -  need to call System.arraycopy
2206   //
2207   address generate_generic_copy(const char *name,
2208                                 address entry_jbyte_arraycopy,
2209                                 address entry_jshort_arraycopy,
2210                                 address entry_jint_arraycopy,
2211                                 address entry_oop_arraycopy,
2212                                 address entry_disjoint_oop_arraycopy,
2213                                 address entry_jlong_arraycopy,
2214                                 address entry_checkcast_arraycopy) {
2215     Label L_failed, L_objArray;
2216 
2217     // Input registers
2218     const Register src       = R3_ARG1;  // source array oop
2219     const Register src_pos   = R4_ARG2;  // source position
2220     const Register dst       = R5_ARG3;  // destination array oop
2221     const Register dst_pos   = R6_ARG4;  // destination position
2222     const Register length    = R7_ARG5;  // elements count
2223 
2224     // registers used as temp
2225     const Register src_klass = R8_ARG6;  // source array klass
2226     const Register dst_klass = R9_ARG7;  // destination array klass
2227     const Register lh        = R10_ARG8; // layout handler
2228     const Register temp      = R2;
2229 
2230     //__ align(CodeEntryAlignment);
2231     StubCodeMark mark(this, "StubRoutines", name);
2232     address start = __ function_entry();
2233 
2234     // Bump this on entry, not on exit:
2235     //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
2236 
2237     // In principle, the int arguments could be dirty.
2238 
2239     //-----------------------------------------------------------------------
2240     // Assembler stubs will be used for this call to arraycopy
2241     // if the following conditions are met:
2242     //
2243     // (1) src and dst must not be null.
2244     // (2) src_pos must not be negative.
2245     // (3) dst_pos must not be negative.
2246     // (4) length  must not be negative.
2247     // (5) src klass and dst klass should be the same and not NULL.
2248     // (6) src and dst should be arrays.
2249     // (7) src_pos + length must not exceed length of src.
2250     // (8) dst_pos + length must not exceed length of dst.
2251     BLOCK_COMMENT("arraycopy initial argument checks");
2252 
2253     __ cmpdi(CCR1, src, 0);      // if (src == NULL) return -1;
2254     __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
2255     __ cmpdi(CCR5, dst, 0);      // if (dst == NULL) return -1;
2256     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2257     __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
2258     __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
2259     __ extsw_(length, length);   // if (length < 0) return -1;
2260     __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
2261     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2262     __ beq(CCR1, L_failed);
2263 
2264     BLOCK_COMMENT("arraycopy argument klass checks");
2265     __ load_klass(src_klass, src);
2266     __ load_klass(dst_klass, dst);
2267 
2268     // Load layout helper
2269     //
2270     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2271     // 32        30    24            16              8     2                 0
2272     //
2273     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2274     //
2275 
2276     int lh_offset = in_bytes(Klass::layout_helper_offset());
2277 
2278     // Load 32-bits signed value. Use br() instruction with it to check icc.
2279     __ lwz(lh, lh_offset, src_klass);
2280 
2281     // Handle objArrays completely differently...
2282     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2283     __ load_const_optimized(temp, objArray_lh, R0);
2284     __ cmpw(CCR0, lh, temp);
2285     __ beq(CCR0, L_objArray);
2286 
2287     __ cmpd(CCR5, src_klass, dst_klass);          // if (src->klass() != dst->klass()) return -1;
2288     __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
2289 
2290     __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
2291     __ beq(CCR5, L_failed);
2292 
2293     // At this point, it is known to be a typeArray (array_tag 0x3).
2294 #ifdef ASSERT
2295     { Label L;
2296       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2297       __ load_const_optimized(temp, lh_prim_tag_in_place, R0);
2298       __ cmpw(CCR0, lh, temp);
2299       __ bge(CCR0, L);
2300       __ stop("must be a primitive array");
2301       __ bind(L);
2302     }
2303 #endif
2304 
2305     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2306                            temp, dst_klass, L_failed);
2307 
2308     // TypeArrayKlass
2309     //
2310     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2311     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2312     //
2313 
2314     const Register offset = dst_klass;    // array offset
2315     const Register elsize = src_klass;    // log2 element size
2316 
2317     __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
2318     __ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
2319     __ add(src, offset, src);       // src array offset
2320     __ add(dst, offset, dst);       // dst array offset
2321 
2322     // Next registers should be set before the jump to corresponding stub.
2323     const Register from     = R3_ARG1;  // source array address
2324     const Register to       = R4_ARG2;  // destination array address
2325     const Register count    = R5_ARG3;  // elements count
2326 
2327     // 'from', 'to', 'count' registers should be set in this order
2328     // since they are the same as 'src', 'src_pos', 'dst'.
2329 
2330     BLOCK_COMMENT("scale indexes to element size");
2331     __ sld(src_pos, src_pos, elsize);
2332     __ sld(dst_pos, dst_pos, elsize);
2333     __ add(from, src_pos, src);  // src_addr
2334     __ add(to, dst_pos, dst);    // dst_addr
2335     __ mr(count, length);        // length
2336 
2337     BLOCK_COMMENT("choose copy loop based on element size");
2338     // Using conditional branches with range 32kB.
2339     const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
2340     __ cmpwi(CCR0, elsize, 0);
2341     __ bc(bo, bi, entry_jbyte_arraycopy);
2342     __ cmpwi(CCR0, elsize, LogBytesPerShort);
2343     __ bc(bo, bi, entry_jshort_arraycopy);
2344     __ cmpwi(CCR0, elsize, LogBytesPerInt);
2345     __ bc(bo, bi, entry_jint_arraycopy);
2346 #ifdef ASSERT
2347     { Label L;
2348       __ cmpwi(CCR0, elsize, LogBytesPerLong);
2349       __ beq(CCR0, L);
2350       __ stop("must be long copy, but elsize is wrong");
2351       __ bind(L);
2352     }
2353 #endif
2354     __ b(entry_jlong_arraycopy);
2355 
2356     // ObjArrayKlass
2357   __ bind(L_objArray);
2358     // live at this point:  src_klass, dst_klass, src[_pos], dst[_pos], length
2359 
2360     Label L_disjoint_plain_copy, L_checkcast_copy;
2361     //  test array classes for subtyping
2362     __ cmpd(CCR0, src_klass, dst_klass);         // usual case is exact equality
2363     __ bne(CCR0, L_checkcast_copy);
2364 
2365     // Identically typed arrays can be copied without element-wise checks.
2366     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2367                            temp, lh, L_failed);
2368 
2369     __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2370     __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2371     __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2372     __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2373     __ add(from, src_pos, src);  // src_addr
2374     __ add(to, dst_pos, dst);    // dst_addr
2375     __ mr(count, length);        // length
2376     __ b(entry_oop_arraycopy);
2377 
2378   __ bind(L_checkcast_copy);
2379     // live at this point:  src_klass, dst_klass
2380     {
2381       // Before looking at dst.length, make sure dst is also an objArray.
2382       __ lwz(temp, lh_offset, dst_klass);
2383       __ cmpw(CCR0, lh, temp);
2384       __ bne(CCR0, L_failed);
2385 
2386       // It is safe to examine both src.length and dst.length.
2387       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2388                              temp, lh, L_failed);
2389 
2390       // Marshal the base address arguments now, freeing registers.
2391       __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2392       __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2393       __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2394       __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2395       __ add(from, src_pos, src);  // src_addr
2396       __ add(to, dst_pos, dst);    // dst_addr
2397       __ mr(count, length);        // length
2398 
2399       Register sco_temp = R6_ARG4;             // This register is free now.
2400       assert_different_registers(from, to, count, sco_temp,
2401                                  dst_klass, src_klass);
2402 
2403       // Generate the type check.
2404       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2405       __ lwz(sco_temp, sco_offset, dst_klass);
2406       generate_type_check(src_klass, sco_temp, dst_klass,
2407                           temp, L_disjoint_plain_copy);
2408 
2409       // Fetch destination element klass from the ObjArrayKlass header.
2410       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2411 
2412       // The checkcast_copy loop needs two extra arguments:
2413       __ ld(R7_ARG5, ek_offset, dst_klass);   // dest elem klass
2414       __ lwz(R6_ARG4, sco_offset, R7_ARG5);   // sco of elem klass
2415       __ b(entry_checkcast_arraycopy);
2416     }
2417 
2418     __ bind(L_disjoint_plain_copy);
2419     __ b(entry_disjoint_oop_arraycopy);
2420 
2421   __ bind(L_failed);
2422     __ li(R3_RET, -1); // return -1
2423     __ blr();
2424     return start;
2425   }
2426 
2427 
2428   void generate_arraycopy_stubs() {
2429     // Note: the disjoint stubs must be generated first, some of
2430     // the conjoint stubs use them.
2431 
2432     // non-aligned disjoint versions
2433     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2434     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2435     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2436     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
2437     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
2438     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
2439 
2440     // aligned disjoint versions
2441     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
2442     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
2443     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
2444     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
2445     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
2446     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
2447 
2448     // non-aligned conjoint versions
2449     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2450     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
2451     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
2452     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
2453     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
2454     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
2455 
2456     // aligned conjoint versions
2457     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
2458     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
2459     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
2460     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
2461     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
2462     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
2463 
2464     // special/generic versions
2465     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", false);
2466     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
2467 
2468     StubRoutines::_unsafe_arraycopy  = generate_unsafe_copy("unsafe_arraycopy",
2469                                                             STUB_ENTRY(jbyte_arraycopy),
2470                                                             STUB_ENTRY(jshort_arraycopy),
2471                                                             STUB_ENTRY(jint_arraycopy),
2472                                                             STUB_ENTRY(jlong_arraycopy));
2473     StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
2474                                                              STUB_ENTRY(jbyte_arraycopy),
2475                                                              STUB_ENTRY(jshort_arraycopy),
2476                                                              STUB_ENTRY(jint_arraycopy),
2477                                                              STUB_ENTRY(oop_arraycopy),
2478                                                              STUB_ENTRY(oop_disjoint_arraycopy),
2479                                                              STUB_ENTRY(jlong_arraycopy),
2480                                                              STUB_ENTRY(checkcast_arraycopy));
2481 
2482     // fill routines
2483     if (OptimizeFill) {
2484       StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
2485       StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
2486       StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
2487       StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
2488       StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2489       StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
2490     }
2491   }
2492 
2493   // Safefetch stubs.
2494   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2495     // safefetch signatures:
2496     //   int      SafeFetch32(int*      adr, int      errValue);
2497     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2498     //
2499     // arguments:
2500     //   R3_ARG1 = adr
2501     //   R4_ARG2 = errValue
2502     //
2503     // result:
2504     //   R3_RET  = *adr or errValue
2505 
2506     StubCodeMark mark(this, "StubRoutines", name);
2507 
2508     // Entry point, pc or function descriptor.
2509     *entry = __ function_entry();
2510 
2511     // Load *adr into R4_ARG2, may fault.
2512     *fault_pc = __ pc();
2513     switch (size) {
2514       case 4:
2515         // int32_t, signed extended
2516         __ lwa(R4_ARG2, 0, R3_ARG1);
2517         break;
2518       case 8:
2519         // int64_t
2520         __ ld(R4_ARG2, 0, R3_ARG1);
2521         break;
2522       default:
2523         ShouldNotReachHere();
2524     }
2525 
2526     // return errValue or *adr
2527     *continuation_pc = __ pc();
2528     __ mr(R3_RET, R4_ARG2);
2529     __ blr();
2530   }
2531 
2532   // Stub for BigInteger::multiplyToLen()
2533   //
2534   //  Arguments:
2535   //
2536   //  Input:
2537   //    R3 - x address
2538   //    R4 - x length
2539   //    R5 - y address
2540   //    R6 - y length
2541   //    R7 - z address
2542   //    R8 - z length
2543   //
2544   address generate_multiplyToLen() {
2545 
2546     StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
2547 
2548     address start = __ function_entry();
2549 
2550     const Register x     = R3;
2551     const Register xlen  = R4;
2552     const Register y     = R5;
2553     const Register ylen  = R6;
2554     const Register z     = R7;
2555     const Register zlen  = R8;
2556 
2557     const Register tmp1  = R2; // TOC not used.
2558     const Register tmp2  = R9;
2559     const Register tmp3  = R10;
2560     const Register tmp4  = R11;
2561     const Register tmp5  = R12;
2562 
2563     // non-volatile regs
2564     const Register tmp6  = R31;
2565     const Register tmp7  = R30;
2566     const Register tmp8  = R29;
2567     const Register tmp9  = R28;
2568     const Register tmp10 = R27;
2569     const Register tmp11 = R26;
2570     const Register tmp12 = R25;
2571     const Register tmp13 = R24;
2572 
2573     BLOCK_COMMENT("Entry:");
2574 
2575     // C2 does not respect int to long conversion for stub calls.
2576     __ clrldi(xlen, xlen, 32);
2577     __ clrldi(ylen, ylen, 32);
2578     __ clrldi(zlen, zlen, 32);
2579 
2580     // Save non-volatile regs (frameless).
2581     int current_offs = 8;
2582     __ std(R24, -current_offs, R1_SP); current_offs += 8;
2583     __ std(R25, -current_offs, R1_SP); current_offs += 8;
2584     __ std(R26, -current_offs, R1_SP); current_offs += 8;
2585     __ std(R27, -current_offs, R1_SP); current_offs += 8;
2586     __ std(R28, -current_offs, R1_SP); current_offs += 8;
2587     __ std(R29, -current_offs, R1_SP); current_offs += 8;
2588     __ std(R30, -current_offs, R1_SP); current_offs += 8;
2589     __ std(R31, -current_offs, R1_SP);
2590 
2591     __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
2592                        tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
2593 
2594     // Restore non-volatile regs.
2595     current_offs = 8;
2596     __ ld(R24, -current_offs, R1_SP); current_offs += 8;
2597     __ ld(R25, -current_offs, R1_SP); current_offs += 8;
2598     __ ld(R26, -current_offs, R1_SP); current_offs += 8;
2599     __ ld(R27, -current_offs, R1_SP); current_offs += 8;
2600     __ ld(R28, -current_offs, R1_SP); current_offs += 8;
2601     __ ld(R29, -current_offs, R1_SP); current_offs += 8;
2602     __ ld(R30, -current_offs, R1_SP); current_offs += 8;
2603     __ ld(R31, -current_offs, R1_SP);
2604 
2605     __ blr();  // Return to caller.
2606 
2607     return start;
2608   }
2609 
2610   /**
2611    * Arguments:
2612    *
2613    * Inputs:
2614    *   R3_ARG1    - int   crc
2615    *   R4_ARG2    - byte* buf
2616    *   R5_ARG3    - int   length (of buffer)
2617    *
2618    * scratch:
2619    *   R6_ARG4    - crc table address
2620    *   R7_ARG5    - tmp1
2621    *   R8_ARG6    - tmp2
2622    *
2623    * Ouput:
2624    *   R3_RET     - int   crc result
2625    */
2626   // Compute CRC32 function.
2627   address generate_CRC32_updateBytes(const char* name) {
2628     __ align(CodeEntryAlignment);
2629     StubCodeMark mark(this, "StubRoutines", name);
2630     address start = __ function_entry();  // Remember stub start address (is rtn value).
2631 
2632     // arguments to kernel_crc32:
2633     Register       crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
2634     Register       data    = R4_ARG2;  // source byte array
2635     Register       dataLen = R5_ARG3;  // #bytes to process
2636     Register       table   = R6_ARG4;  // crc table address
2637 
2638     Register       t0      = R9;       // work reg for kernel* emitters
2639     Register       t1      = R10;      // work reg for kernel* emitters
2640     Register       t2      = R11;      // work reg for kernel* emitters
2641     Register       t3      = R12;      // work reg for kernel* emitters
2642 
2643     BLOCK_COMMENT("Stub body {");
2644     assert_different_registers(crc, data, dataLen, table);
2645 
2646     StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
2647 
2648     __ kernel_crc32_1byte(crc, data, dataLen, table, t0, t1, t2, t3);
2649 
2650     BLOCK_COMMENT("return");
2651     __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
2652     __ blr();
2653 
2654     BLOCK_COMMENT("} Stub body");
2655     return start;
2656   }
2657 
2658   // Initialization
2659   void generate_initial() {
2660     // Generates all stubs and initializes the entry points
2661 
2662     // Entry points that exist in all platforms.
2663     // Note: This is code that could be shared among different platforms - however the
2664     // benefit seems to be smaller than the disadvantage of having a
2665     // much more complicated generator structure. See also comment in
2666     // stubRoutines.hpp.
2667 
2668     StubRoutines::_forward_exception_entry          = generate_forward_exception();
2669     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
2670     StubRoutines::_catch_exception_entry            = generate_catch_exception();
2671 
2672     // Build this early so it's available for the interpreter.
2673     StubRoutines::_throw_StackOverflowError_entry   =
2674       generate_throw_exception("StackOverflowError throw_exception",
2675                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2676 
2677     // CRC32 Intrinsics.
2678     if (UseCRC32Intrinsics) {
2679       StubRoutines::_crc_table_adr    = (address)StubRoutines::ppc64::_crc_table;
2680       StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
2681     }
2682   }
2683 
2684   void generate_all() {
2685     // Generates all stubs and initializes the entry points
2686 
2687     // These entry points require SharedInfo::stack0 to be set up in
2688     // non-core builds
2689     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
2690     // Handle IncompatibleClassChangeError in itable stubs.
2691     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
2692     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2693 
2694     StubRoutines::_handler_for_unsafe_access_entry         = generate_handler_for_unsafe_access();
2695 
2696     // support for verify_oop (must happen after universe_init)
2697     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
2698 
2699     // arraycopy stubs used by compilers
2700     generate_arraycopy_stubs();
2701 
2702     if (UseAESIntrinsics) {
2703       guarantee(!UseAESIntrinsics, "not yet implemented.");
2704     }
2705 
2706     // Safefetch stubs.
2707     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
2708                                                        &StubRoutines::_safefetch32_fault_pc,
2709                                                        &StubRoutines::_safefetch32_continuation_pc);
2710     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
2711                                                        &StubRoutines::_safefetchN_fault_pc,
2712                                                        &StubRoutines::_safefetchN_continuation_pc);
2713 
2714 #ifdef COMPILER2
2715     if (UseMultiplyToLenIntrinsic) {
2716       StubRoutines::_multiplyToLen = generate_multiplyToLen();
2717     }
2718 #endif
2719 
2720     if (UseMontgomeryMultiplyIntrinsic) {
2721       StubRoutines::_montgomeryMultiply
2722         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
2723     }
2724     if (UseMontgomerySquareIntrinsic) {
2725       StubRoutines::_montgomerySquare
2726         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
2727     }
2728   }
2729 
2730  public:
2731   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2732     // replace the standard masm with a special one:
2733     _masm = new MacroAssembler(code);
2734     if (all) {
2735       generate_all();
2736     } else {
2737       generate_initial();
2738     }
2739   }
2740 };
2741 
2742 void StubGenerator_generate(CodeBuffer* code, bool all) {
2743   StubGenerator g(code, all);
2744 }