1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "assembler_arm.inline.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "nativeInst_arm.hpp"
  32 #include "oops/instanceOop.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubCodeGenerator.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "utilities/align.hpp"
  43 #ifdef COMPILER2
  44 #include "opto/runtime.hpp"
  45 #endif
  46 
  47 // Declaration and definition of StubGenerator (no .hpp file).
  48 // For a more detailed description of the stub routine structure
  49 // see the comment in stubRoutines.hpp
  50 
  51 #define __ _masm->
  52 
  53 #ifdef PRODUCT
  54 #define BLOCK_COMMENT(str) /* nothing */
  55 #else
  56 #define BLOCK_COMMENT(str) __ block_comment(str)
  57 #endif
  58 
  59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  60 
  61 // -------------------------------------------------------------------------------------------------------------------------
  62 // Stub Code definitions
  63 
  64 // Platform dependent parameters for array copy stubs
  65 
  66 // Note: we have noticed a huge change in behavior on a microbenchmark
  67 // from platform to platform depending on the configuration.
  68 
  69 // Instead of adding a series of command line options (which
  70 // unfortunately have to be done in the shared file and cannot appear
  71 // only in the ARM port), the tested result are hard-coded here in a set
  72 // of options, selected by specifying 'ArmCopyPlatform'
  73 
  74 // Currently, this 'platform' is hardcoded to a value that is a good
  75 // enough trade-off.  However, one can easily modify this file to test
  76 // the hard-coded configurations or create new ones. If the gain is
  77 // significant, we could decide to either add command line options or
  78 // add code to automatically choose a configuration.
  79 
  80 // see comments below for the various configurations created
  81 #define DEFAULT_ARRAYCOPY_CONFIG 0
  82 #define TEGRA2_ARRAYCOPY_CONFIG 1
  83 #define IMX515_ARRAYCOPY_CONFIG 2
  84 
  85 // Hard coded choices (XXX: could be changed to a command line option)
  86 #define ArmCopyPlatform DEFAULT_ARRAYCOPY_CONFIG
  87 
  88 #define ArmCopyCacheLineSize 32 // not worth optimizing to 64 according to measured gains
  89 
  90 // configuration for each kind of loop
  91 typedef struct {
  92   int pld_distance;       // prefetch distance (0 => no prefetch, <0: prefetch_before);
  93   bool split_ldm;         // if true, split each STM in STMs with fewer registers
  94   bool split_stm;         // if true, split each LTM in LTMs with fewer registers
  95 } arraycopy_loop_config;
  96 
  97 // configuration for all loops
  98 typedef struct {
  99   // const char *description;
 100   arraycopy_loop_config forward_aligned;
 101   arraycopy_loop_config backward_aligned;
 102   arraycopy_loop_config forward_shifted;
 103   arraycopy_loop_config backward_shifted;
 104 } arraycopy_platform_config;
 105 
 106 // configured platforms
 107 static arraycopy_platform_config arraycopy_configurations[] = {
 108   // configuration parameters for arraycopy loops
 109 
 110   // Configurations were chosen based on manual analysis of benchmark
 111   // results, minimizing overhead with respect to best results on the
 112   // different test cases.
 113 
 114   // Prefetch before is always favored since it avoids dirtying the
 115   // cache uselessly for small copies. Code for prefetch after has
 116   // been kept in case the difference is significant for some
 117   // platforms but we might consider dropping it.
 118 
 119   // distance, ldm, stm
 120   {
 121     // default: tradeoff tegra2/imx515/nv-tegra2,
 122     // Notes on benchmarking:
 123     // - not far from optimal configuration on nv-tegra2
 124     // - within 5% of optimal configuration except for backward aligned on IMX
 125     // - up to 40% from optimal configuration for backward shifted and backward align for tegra2
 126     //   but still on par with the operating system copy
 127     {-256, true,  true  }, // forward aligned
 128     {-256, true,  true  }, // backward aligned
 129     {-256, false, false }, // forward shifted
 130     {-256, true,  true  } // backward shifted
 131   },
 132   {
 133     // configuration tuned on tegra2-4.
 134     // Warning: should not be used on nv-tegra2 !
 135     // Notes:
 136     // - prefetch after gives 40% gain on backward copies on tegra2-4,
 137     //   resulting in better number than the operating system
 138     //   copy. However, this can lead to a 300% loss on nv-tegra and has
 139     //   more impact on the cache (fetches futher than what is
 140     //   copied). Use this configuration with care, in case it improves
 141     //   reference benchmarks.
 142     {-256, true,  true  }, // forward aligned
 143     {96,   false, false }, // backward aligned
 144     {-256, false, false }, // forward shifted
 145     {96,   false, false } // backward shifted
 146   },
 147   {
 148     // configuration tuned on imx515
 149     // Notes:
 150     // - smaller prefetch distance is sufficient to get good result and might be more stable
 151     // - refined backward aligned options within 5% of optimal configuration except for
 152     //   tests were the arrays fit in the cache
 153     {-160, false, false }, // forward aligned
 154     {-160, false, false }, // backward aligned
 155     {-160, false, false }, // forward shifted
 156     {-160, true,  true  } // backward shifted
 157   }
 158 };
 159 
 160 class StubGenerator: public StubCodeGenerator {
 161 
 162 #ifdef PRODUCT
 163 #define inc_counter_np(a,b,c) ((void)0)
 164 #else
 165 #define inc_counter_np(counter, t1, t2) \
 166   BLOCK_COMMENT("inc_counter " #counter); \
 167   __ inc_counter(&counter, t1, t2);
 168 #endif
 169 
 170  private:
 171 
 172   address generate_call_stub(address& return_address) {
 173     StubCodeMark mark(this, "StubRoutines", "call_stub");
 174     address start = __ pc();
 175 
 176 
 177     assert(frame::entry_frame_call_wrapper_offset == 0, "adjust this code");
 178 
 179     __ mov(Rtemp, SP);
 180     __ push(RegisterSet(FP) | RegisterSet(LR));
 181 #ifndef __SOFTFP__
 182     __ fstmdbd(SP, FloatRegisterSet(D8, 8), writeback);
 183 #endif
 184     __ stmdb(SP, RegisterSet(R0, R2) | RegisterSet(R4, R6) | RegisterSet(R8, R10) | altFP_7_11, writeback);
 185     __ mov(Rmethod, R3);
 186     __ ldmia(Rtemp, RegisterSet(R1, R3) | Rthread); // stacked arguments
 187 
 188     // XXX: TODO
 189     // Would be better with respect to native tools if the following
 190     // setting of FP was changed to conform to the native ABI, with FP
 191     // pointing to the saved FP slot (and the corresponding modifications
 192     // for entry_frame_call_wrapper_offset and frame::real_fp).
 193     __ mov(FP, SP);
 194 
 195     {
 196       Label no_parameters, pass_parameters;
 197       __ cmp(R3, 0);
 198       __ b(no_parameters, eq);
 199 
 200       __ bind(pass_parameters);
 201       __ ldr(Rtemp, Address(R2, wordSize, post_indexed)); // Rtemp OK, unused and scratchable
 202       __ subs(R3, R3, 1);
 203       __ push(Rtemp);
 204       __ b(pass_parameters, ne);
 205       __ bind(no_parameters);
 206     }
 207 
 208     __ mov(Rsender_sp, SP);
 209     __ blx(R1);
 210     return_address = __ pc();
 211 
 212     __ add(SP, FP, wordSize); // Skip link to JavaCallWrapper
 213     __ pop(RegisterSet(R2, R3));
 214 #ifndef __ABI_HARD__
 215     __ cmp(R3, T_LONG);
 216     __ cmp(R3, T_DOUBLE, ne);
 217     __ str(R0, Address(R2));
 218     __ str(R1, Address(R2, wordSize), eq);
 219 #else
 220     Label cont, l_float, l_double;
 221 
 222     __ cmp(R3, T_DOUBLE);
 223     __ b(l_double, eq);
 224 
 225     __ cmp(R3, T_FLOAT);
 226     __ b(l_float, eq);
 227 
 228     __ cmp(R3, T_LONG);
 229     __ str(R0, Address(R2));
 230     __ str(R1, Address(R2, wordSize), eq);
 231     __ b(cont);
 232 
 233 
 234     __ bind(l_double);
 235     __ fstd(D0, Address(R2));
 236     __ b(cont);
 237 
 238     __ bind(l_float);
 239     __ fsts(S0, Address(R2));
 240 
 241     __ bind(cont);
 242 #endif
 243 
 244     __ pop(RegisterSet(R4, R6) | RegisterSet(R8, R10) | altFP_7_11);
 245 #ifndef __SOFTFP__
 246     __ fldmiad(SP, FloatRegisterSet(D8, 8), writeback);
 247 #endif
 248     __ pop(RegisterSet(FP) | RegisterSet(PC));
 249 
 250     return start;
 251   }
 252 
 253 
 254   // (in) Rexception_obj: exception oop
 255   address generate_catch_exception() {
 256     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 257     address start = __ pc();
 258 
 259     __ str(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
 260     __ b(StubRoutines::_call_stub_return_address);
 261 
 262     return start;
 263   }
 264 
 265 
 266   // (in) Rexception_pc: return address
 267   address generate_forward_exception() {
 268     StubCodeMark mark(this, "StubRoutines", "forward exception");
 269     address start = __ pc();
 270 
 271     __ mov(c_rarg0, Rthread);
 272     __ mov(c_rarg1, Rexception_pc);
 273     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 274                          SharedRuntime::exception_handler_for_return_address),
 275                          c_rarg0, c_rarg1);
 276     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
 277     const Register Rzero = __ zero_register(Rtemp); // Rtemp OK (cleared by above call)
 278     __ str(Rzero, Address(Rthread, Thread::pending_exception_offset()));
 279 
 280 #ifdef ASSERT
 281     // make sure exception is set
 282     { Label L;
 283       __ cbnz(Rexception_obj, L);
 284       __ stop("StubRoutines::forward exception: no pending exception (2)");
 285       __ bind(L);
 286     }
 287 #endif
 288 
 289     // Verify that there is really a valid exception in RAX.
 290     __ verify_oop(Rexception_obj);
 291 
 292     __ jump(R0); // handler is returned in R0 by runtime function
 293     return start;
 294   }
 295 
 296 
 297 
 298   // Integer division shared routine
 299   //   Input:
 300   //     R0  - dividend
 301   //     R2  - divisor
 302   //   Output:
 303   //     R0  - remainder
 304   //     R1  - quotient
 305   //   Destroys:
 306   //     R2
 307   //     LR
 308   address generate_idiv_irem() {
 309     Label positive_arguments, negative_or_zero, call_slow_path;
 310     Register dividend  = R0;
 311     Register divisor   = R2;
 312     Register remainder = R0;
 313     Register quotient  = R1;
 314     Register tmp       = LR;
 315     assert(dividend == remainder, "must be");
 316 
 317     address start = __ pc();
 318 
 319     // Check for special cases: divisor <= 0 or dividend < 0
 320     __ cmp(divisor, 0);
 321     __ orrs(quotient, dividend, divisor, ne);
 322     __ b(negative_or_zero, le);
 323 
 324     __ bind(positive_arguments);
 325     // Save return address on stack to free one extra register
 326     __ push(LR);
 327     // Approximate the mamximum order of the quotient
 328     __ clz(tmp, dividend);
 329     __ clz(quotient, divisor);
 330     __ subs(tmp, quotient, tmp);
 331     __ mov(quotient, 0);
 332     // Jump to the appropriate place in the unrolled loop below
 333     __ ldr(PC, Address(PC, tmp, lsl, 2), pl);
 334     // If divisor is greater than dividend, return immediately
 335     __ pop(PC);
 336 
 337     // Offset table
 338     Label offset_table[32];
 339     int i;
 340     for (i = 0; i <= 31; i++) {
 341       __ emit_address(offset_table[i]);
 342     }
 343 
 344     // Unrolled loop of 32 division steps
 345     for (i = 31; i >= 0; i--) {
 346       __ bind(offset_table[i]);
 347       __ cmp(remainder, AsmOperand(divisor, lsl, i));
 348       __ sub(remainder, remainder, AsmOperand(divisor, lsl, i), hs);
 349       __ add(quotient, quotient, 1 << i, hs);
 350     }
 351     __ pop(PC);
 352 
 353     __ bind(negative_or_zero);
 354     // Find the combination of argument signs and jump to corresponding handler
 355     __ andr(quotient, dividend, 0x80000000, ne);
 356     __ orr(quotient, quotient, AsmOperand(divisor, lsr, 31), ne);
 357     __ add(PC, PC, AsmOperand(quotient, ror, 26), ne);
 358     __ str(LR, Address(Rthread, JavaThread::saved_exception_pc_offset()));
 359 
 360     // The leaf runtime function can destroy R0-R3 and R12 registers which are still alive
 361     RegisterSet saved_registers = RegisterSet(R3) | RegisterSet(R12);
 362 #if R9_IS_SCRATCHED
 363     // Safer to save R9 here since callers may have been written
 364     // assuming R9 survives. This is suboptimal but may not be worth
 365     // revisiting for this slow case.
 366 
 367     // save also R10 for alignment
 368     saved_registers = saved_registers | RegisterSet(R9, R10);
 369 #endif
 370     {
 371       // divisor == 0
 372       FixedSizeCodeBlock zero_divisor(_masm, 8, true);
 373       __ push(saved_registers);
 374       __ mov(R0, Rthread);
 375       __ mov(R1, LR);
 376       __ mov(R2, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
 377       __ b(call_slow_path);
 378     }
 379 
 380     {
 381       // divisor > 0 && dividend < 0
 382       FixedSizeCodeBlock positive_divisor_negative_dividend(_masm, 8, true);
 383       __ push(LR);
 384       __ rsb(dividend, dividend, 0);
 385       __ bl(positive_arguments);
 386       __ rsb(remainder, remainder, 0);
 387       __ rsb(quotient, quotient, 0);
 388       __ pop(PC);
 389     }
 390 
 391     {
 392       // divisor < 0 && dividend > 0
 393       FixedSizeCodeBlock negative_divisor_positive_dividend(_masm, 8, true);
 394       __ push(LR);
 395       __ rsb(divisor, divisor, 0);
 396       __ bl(positive_arguments);
 397       __ rsb(quotient, quotient, 0);
 398       __ pop(PC);
 399     }
 400 
 401     {
 402       // divisor < 0 && dividend < 0
 403       FixedSizeCodeBlock negative_divisor_negative_dividend(_masm, 8, true);
 404       __ push(LR);
 405       __ rsb(dividend, dividend, 0);
 406       __ rsb(divisor, divisor, 0);
 407       __ bl(positive_arguments);
 408       __ rsb(remainder, remainder, 0);
 409       __ pop(PC);
 410     }
 411 
 412     __ bind(call_slow_path);
 413     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::continuation_for_implicit_exception));
 414     __ pop(saved_registers);
 415     __ bx(R0);
 416 
 417     return start;
 418   }
 419 
 420 
 421  // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
 422  //  <fence>; <op>; <membar StoreLoad|StoreStore>
 423  // But for load-linked/store-conditional based systems a fence here simply means
 424  // no load/store can be reordered with respect to the initial load-linked, so we have:
 425  // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
 426  // There are no memory actions in <op> so nothing further is needed.
 427  //
 428  // So we define the following for convenience:
 429 #define MEMBAR_ATOMIC_OP_PRE \
 430     MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
 431 #define MEMBAR_ATOMIC_OP_POST \
 432     MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
 433 
 434   // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
 435   // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
 436   // for which we do not support MP and so membars are not necessary. This ARMv5 code will
 437   // be removed in the future.
 438 
 439   // Support for jint Atomic::add(jint add_value, volatile jint *dest)
 440   //
 441   // Arguments :
 442   //
 443   //      add_value:      R0
 444   //      dest:           R1
 445   //
 446   // Results:
 447   //
 448   //     R0: the new stored in dest
 449   //
 450   // Overwrites:
 451   //
 452   //     R1, R2, R3
 453   //
 454   address generate_atomic_add() {
 455     address start;
 456 
 457     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 458     Label retry;
 459     start = __ pc();
 460     Register addval    = R0;
 461     Register dest      = R1;
 462     Register prev      = R2;
 463     Register ok        = R2;
 464     Register newval    = R3;
 465 
 466     if (VM_Version::supports_ldrex()) {
 467       __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
 468       __ bind(retry);
 469       __ ldrex(newval, Address(dest));
 470       __ add(newval, addval, newval);
 471       __ strex(ok, newval, Address(dest));
 472       __ cmp(ok, 0);
 473       __ b(retry, ne);
 474       __ mov (R0, newval);
 475       __ membar(MEMBAR_ATOMIC_OP_POST, prev);
 476     } else {
 477       __ bind(retry);
 478       __ ldr (prev, Address(dest));
 479       __ add(newval, addval, prev);
 480       __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
 481       __ b(retry, ne);
 482       __ mov (R0, newval);
 483     }
 484     __ bx(LR);
 485 
 486     return start;
 487   }
 488 
 489   // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
 490   //
 491   // Arguments :
 492   //
 493   //      exchange_value: R0
 494   //      dest:           R1
 495   //
 496   // Results:
 497   //
 498   //     R0: the value previously stored in dest
 499   //
 500   // Overwrites:
 501   //
 502   //     R1, R2, R3
 503   //
 504   address generate_atomic_xchg() {
 505     address start;
 506 
 507     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 508     start = __ pc();
 509     Register newval    = R0;
 510     Register dest      = R1;
 511     Register prev      = R2;
 512 
 513     Label retry;
 514 
 515     if (VM_Version::supports_ldrex()) {
 516       Register ok=R3;
 517       __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
 518       __ bind(retry);
 519       __ ldrex(prev, Address(dest));
 520       __ strex(ok, newval, Address(dest));
 521       __ cmp(ok, 0);
 522       __ b(retry, ne);
 523       __ mov (R0, prev);
 524       __ membar(MEMBAR_ATOMIC_OP_POST, prev);
 525     } else {
 526       __ bind(retry);
 527       __ ldr (prev, Address(dest));
 528       __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
 529       __ b(retry, ne);
 530       __ mov (R0, prev);
 531     }
 532     __ bx(LR);
 533 
 534     return start;
 535   }
 536 
 537   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
 538   //
 539   // Arguments :
 540   //
 541   //      compare_value:  R0
 542   //      exchange_value: R1
 543   //      dest:           R2
 544   //
 545   // Results:
 546   //
 547   //     R0: the value previously stored in dest
 548   //
 549   // Overwrites:
 550   //
 551   //     R0, R1, R2, R3, Rtemp
 552   //
 553   address generate_atomic_cmpxchg() {
 554     address start;
 555 
 556     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 557     start = __ pc();
 558     Register cmp       = R0;
 559     Register newval    = R1;
 560     Register dest      = R2;
 561     Register temp1     = R3;
 562     Register temp2     = Rtemp; // Rtemp free (native ABI)
 563 
 564     __ membar(MEMBAR_ATOMIC_OP_PRE, temp1);
 565 
 566     // atomic_cas returns previous value in R0
 567     __ atomic_cas(temp1, temp2, cmp, newval, dest, 0);
 568 
 569     __ membar(MEMBAR_ATOMIC_OP_POST, temp1);
 570 
 571     __ bx(LR);
 572 
 573     return start;
 574   }
 575 
 576   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 577   // reordered before by a wrapper to (jlong compare_value, jlong exchange_value, volatile jlong *dest)
 578   //
 579   // Arguments :
 580   //
 581   //      compare_value:  R1 (High), R0 (Low)
 582   //      exchange_value: R3 (High), R2 (Low)
 583   //      dest:           SP+0
 584   //
 585   // Results:
 586   //
 587   //     R0:R1: the value previously stored in dest
 588   //
 589   // Overwrites:
 590   //
 591   address generate_atomic_cmpxchg_long() {
 592     address start;
 593 
 594     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 595     start = __ pc();
 596     Register cmp_lo      = R0;
 597     Register cmp_hi      = R1;
 598     Register newval_lo   = R2;
 599     Register newval_hi   = R3;
 600     Register addr        = Rtemp;  /* After load from stack */
 601     Register temp_lo     = R4;
 602     Register temp_hi     = R5;
 603     Register temp_result = R8;
 604     assert_different_registers(cmp_lo, newval_lo, temp_lo, addr, temp_result, R7);
 605     assert_different_registers(cmp_hi, newval_hi, temp_hi, addr, temp_result, R7);
 606 
 607     __ membar(MEMBAR_ATOMIC_OP_PRE, Rtemp); // Rtemp free (native ABI)
 608 
 609     // Stack is unaligned, maintain double word alignment by pushing
 610     // odd number of regs.
 611     __ push(RegisterSet(temp_result) | RegisterSet(temp_lo, temp_hi));
 612     __ ldr(addr, Address(SP, 12));
 613 
 614     // atomic_cas64 returns previous value in temp_lo, temp_hi
 615     __ atomic_cas64(temp_lo, temp_hi, temp_result, cmp_lo, cmp_hi,
 616                     newval_lo, newval_hi, addr, 0);
 617     __ mov(R0, temp_lo);
 618     __ mov(R1, temp_hi);
 619 
 620     __ pop(RegisterSet(temp_result) | RegisterSet(temp_lo, temp_hi));
 621 
 622     __ membar(MEMBAR_ATOMIC_OP_POST, Rtemp); // Rtemp free (native ABI)
 623     __ bx(LR);
 624 
 625     return start;
 626   }
 627 
 628   address generate_atomic_load_long() {
 629     address start;
 630 
 631     StubCodeMark mark(this, "StubRoutines", "atomic_load_long");
 632     start = __ pc();
 633     Register result_lo = R0;
 634     Register result_hi = R1;
 635     Register src       = R0;
 636 
 637     if (!os::is_MP()) {
 638       __ ldmia(src, RegisterSet(result_lo, result_hi));
 639       __ bx(LR);
 640     } else if (VM_Version::supports_ldrexd()) {
 641       __ ldrexd(result_lo, Address(src));
 642       __ clrex(); // FIXME: safe to remove?
 643       __ bx(LR);
 644     } else {
 645       __ stop("Atomic load(jlong) unsupported on this platform");
 646       __ bx(LR);
 647     }
 648 
 649     return start;
 650   }
 651 
 652   address generate_atomic_store_long() {
 653     address start;
 654 
 655     StubCodeMark mark(this, "StubRoutines", "atomic_store_long");
 656     start = __ pc();
 657     Register newval_lo = R0;
 658     Register newval_hi = R1;
 659     Register dest      = R2;
 660     Register scratch_lo    = R2;
 661     Register scratch_hi    = R3;  /* After load from stack */
 662     Register result    = R3;
 663 
 664     if (!os::is_MP()) {
 665       __ stmia(dest, RegisterSet(newval_lo, newval_hi));
 666       __ bx(LR);
 667     } else if (VM_Version::supports_ldrexd()) {
 668       __ mov(Rtemp, dest);  // get dest to Rtemp
 669       Label retry;
 670       __ bind(retry);
 671       __ ldrexd(scratch_lo, Address(Rtemp));
 672       __ strexd(result, R0, Address(Rtemp));
 673       __ rsbs(result, result, 1);
 674       __ b(retry, eq);
 675       __ bx(LR);
 676     } else {
 677       __ stop("Atomic store(jlong) unsupported on this platform");
 678       __ bx(LR);
 679     }
 680 
 681     return start;
 682   }
 683 
 684 
 685 
 686 #ifdef COMPILER2
 687   // Support for uint StubRoutine::Arm::partial_subtype_check( Klass sub, Klass super );
 688   // Arguments :
 689   //
 690   //      ret  : R0, returned
 691   //      icc/xcc: set as R0 (depending on wordSize)
 692   //      sub  : R1, argument, not changed
 693   //      super: R2, argument, not changed
 694   //      raddr: LR, blown by call
 695   address generate_partial_subtype_check() {
 696     __ align(CodeEntryAlignment);
 697     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 698     address start = __ pc();
 699 
 700     // based on SPARC check_klass_subtype_[fast|slow]_path (without CompressedOops)
 701 
 702     // R0 used as tmp_reg (in addition to return reg)
 703     Register sub_klass = R1;
 704     Register super_klass = R2;
 705     Register tmp_reg2 = R3;
 706     Register tmp_reg3 = R4;
 707 #define saved_set tmp_reg2, tmp_reg3
 708 
 709     Label L_loop, L_fail;
 710 
 711     int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 712 
 713     // fast check should be redundant
 714 
 715     // slow check
 716     {
 717       __ raw_push(saved_set);
 718 
 719       // a couple of useful fields in sub_klass:
 720       int ss_offset = in_bytes(Klass::secondary_supers_offset());
 721 
 722       // Do a linear scan of the secondary super-klass chain.
 723       // This code is rarely used, so simplicity is a virtue here.
 724 
 725       inc_counter_np(SharedRuntime::_partial_subtype_ctr, tmp_reg2, tmp_reg3);
 726 
 727       Register scan_temp = tmp_reg2;
 728       Register count_temp = tmp_reg3;
 729 
 730       // We will consult the secondary-super array.
 731       __ ldr(scan_temp, Address(sub_klass, ss_offset));
 732 
 733       Register search_key = super_klass;
 734 
 735       // Load the array length.
 736       __ ldr_s32(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes()));
 737       __ add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes());
 738 
 739       __ add(count_temp, count_temp, 1);
 740 
 741       // Top of search loop
 742       __ bind(L_loop);
 743       // Notes:
 744       //  scan_temp starts at the array elements
 745       //  count_temp is 1+size
 746       __ subs(count_temp, count_temp, 1);
 747       __ b(L_fail, eq); // not found in the array
 748 
 749       // Load next super to check
 750       // In the array of super classes elements are pointer sized.
 751       int element_size = wordSize;
 752       __ ldr(R0, Address(scan_temp, element_size, post_indexed));
 753 
 754       // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
 755       __ subs(R0, R0, search_key); // set R0 to 0 on success (and flags to eq)
 756 
 757       // A miss means we are NOT a subtype and need to keep looping
 758       __ b(L_loop, ne);
 759 
 760       // Falling out the bottom means we found a hit; we ARE a subtype
 761 
 762       // Success.  Cache the super we found and proceed in triumph.
 763       __ str(super_klass, Address(sub_klass, sc_offset));
 764 
 765       // Return success
 766       // R0 is already 0 and flags are already set to eq
 767       __ raw_pop(saved_set);
 768       __ ret();
 769 
 770       // Return failure
 771       __ bind(L_fail);
 772       __ movs(R0, 1); // sets the flags
 773       __ raw_pop(saved_set);
 774       __ ret();
 775     }
 776     return start;
 777   }
 778 #undef saved_set
 779 #endif // COMPILER2
 780 
 781 
 782   //----------------------------------------------------------------------------------------------------
 783   // Non-destructive plausibility checks for oops
 784 
 785   address generate_verify_oop() {
 786     StubCodeMark mark(this, "StubRoutines", "verify_oop");
 787     address start = __ pc();
 788 
 789     // Incoming arguments:
 790     //
 791     // R0: error message (char* )
 792     // R1: address of register save area
 793     // R2: oop to verify
 794     //
 795     // All registers are saved before calling this stub. However, condition flags should be saved here.
 796 
 797     const Register oop   = R2;
 798     const Register klass = R3;
 799     const Register tmp1  = R6;
 800     const Register tmp2  = R8;
 801 
 802     const Register flags     = Rtmp_save0; // R4/R19
 803     const Register ret_addr  = Rtmp_save1; // R5/R20
 804     assert_different_registers(oop, klass, tmp1, tmp2, flags, ret_addr, R7);
 805 
 806     Label exit, error;
 807     InlinedAddress verify_oop_count((address) StubRoutines::verify_oop_count_addr());
 808 
 809     __ mrs(Assembler::CPSR, flags);
 810 
 811     __ ldr_literal(tmp1, verify_oop_count);
 812     __ ldr_s32(tmp2, Address(tmp1));
 813     __ add(tmp2, tmp2, 1);
 814     __ str_32(tmp2, Address(tmp1));
 815 
 816     // make sure object is 'reasonable'
 817     __ cbz(oop, exit);                           // if obj is NULL it is ok
 818 
 819     // Check if the oop is in the right area of memory
 820     // Note: oop_mask and oop_bits must be updated if the code is saved/reused
 821     const address oop_mask = (address) Universe::verify_oop_mask();
 822     const address oop_bits = (address) Universe::verify_oop_bits();
 823     __ mov_address(tmp1, oop_mask, symbolic_Relocation::oop_mask_reference);
 824     __ andr(tmp2, oop, tmp1);
 825     __ mov_address(tmp1, oop_bits, symbolic_Relocation::oop_bits_reference);
 826     __ cmp(tmp2, tmp1);
 827     __ b(error, ne);
 828 
 829     // make sure klass is 'reasonable'
 830     __ load_klass(klass, oop);                   // get klass
 831     __ cbz(klass, error);                        // if klass is NULL it is broken
 832 
 833     // return if everything seems ok
 834     __ bind(exit);
 835 
 836     __ msr(Assembler::CPSR_f, flags);
 837 
 838     __ ret();
 839 
 840     // handle errors
 841     __ bind(error);
 842 
 843     __ mov(ret_addr, LR);                      // save return address
 844 
 845     // R0: error message
 846     // R1: register save area
 847     __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug));
 848 
 849     __ mov(LR, ret_addr);
 850     __ b(exit);
 851 
 852     __ bind_literal(verify_oop_count);
 853 
 854     return start;
 855   }
 856 
 857   //----------------------------------------------------------------------------------------------------
 858   // Array copy stubs
 859 
 860   //
 861   //  Generate overlap test for array copy stubs
 862   //
 863   //  Input:
 864   //    R0    -  array1
 865   //    R1    -  array2
 866   //    R2    -  element count, 32-bit int
 867   //
 868   //  input registers are preserved
 869   //
 870   void array_overlap_test(address no_overlap_target, int log2_elem_size, Register tmp1, Register tmp2) {
 871     assert(no_overlap_target != NULL, "must be generated");
 872     array_overlap_test(no_overlap_target, NULL, log2_elem_size, tmp1, tmp2);
 873   }
 874   void array_overlap_test(Label& L_no_overlap, int log2_elem_size, Register tmp1, Register tmp2) {
 875     array_overlap_test(NULL, &L_no_overlap, log2_elem_size, tmp1, tmp2);
 876   }
 877   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size, Register tmp1, Register tmp2) {
 878     const Register from       = R0;
 879     const Register to         = R1;
 880     const Register count      = R2;
 881     const Register to_from    = tmp1; // to - from
 882     const Register byte_count = (log2_elem_size == 0) ? count : tmp2; // count << log2_elem_size
 883     assert_different_registers(from, to, count, tmp1, tmp2);
 884 
 885     // no_overlap version works if 'to' lower (unsigned) than 'from'
 886     // and or 'to' more than (count*size) from 'from'
 887 
 888     BLOCK_COMMENT("Array Overlap Test:");
 889     __ subs(to_from, to, from);
 890     if (log2_elem_size != 0) {
 891       __ mov(byte_count, AsmOperand(count, lsl, log2_elem_size));
 892     }
 893     if (NOLp == NULL)
 894       __ b(no_overlap_target,lo);
 895     else
 896       __ b((*NOLp), lo);
 897     __ cmp(to_from, byte_count);
 898     if (NOLp == NULL)
 899       __ b(no_overlap_target, ge);
 900     else
 901       __ b((*NOLp), ge);
 902   }
 903 
 904 
 905   //   probably we should choose between "prefetch-store before or after store", not "before or after load".
 906   void prefetch(Register from, Register to, int offset, int to_delta = 0) {
 907     __ prefetch_read(Address(from, offset));
 908   }
 909 
 910   // Generate the inner loop for forward aligned array copy
 911   //
 912   // Arguments
 913   //      from:      src address, 64 bits  aligned
 914   //      to:        dst address, wordSize aligned
 915   //      count:     number of elements (32-bit int)
 916   //      bytes_per_count: number of bytes for each unit of 'count'
 917   //
 918   // Return the minimum initial value for count
 919   //
 920   // Notes:
 921   // - 'from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
 922   // - 'to' aligned on wordSize
 923   // - 'count' must be greater or equal than the returned value
 924   //
 925   // Increases 'from' and 'to' by count*bytes_per_count.
 926   //
 927   // Scratches 'count', R3.
 928   // R4-R10 are preserved (saved/restored).
 929   //
 930   int generate_forward_aligned_copy_loop(Register from, Register to, Register count, int bytes_per_count) {
 931     assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
 932 
 933     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
 934     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].forward_aligned;
 935     int pld_offset = config->pld_distance;
 936     const int count_per_loop = bytes_per_loop / bytes_per_count;
 937 
 938     bool split_read= config->split_ldm;
 939     bool split_write= config->split_stm;
 940 
 941     // XXX optim: use VLDM/VSTM when available (Neon) with PLD
 942     //  NEONCopyPLD
 943     //      PLD [r1, #0xC0]
 944     //      VLDM r1!,{d0-d7}
 945     //      VSTM r0!,{d0-d7}
 946     //      SUBS r2,r2,#0x40
 947     //      BGE NEONCopyPLD
 948 
 949     __ push(RegisterSet(R4,R10));
 950 
 951     const bool prefetch_before = pld_offset < 0;
 952     const bool prefetch_after = pld_offset > 0;
 953 
 954     Label L_skip_pld;
 955 
 956     // predecrease to exit when there is less than count_per_loop
 957     __ sub_32(count, count, count_per_loop);
 958 
 959     if (pld_offset != 0) {
 960       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
 961 
 962       prefetch(from, to, 0);
 963 
 964       if (prefetch_before) {
 965         // If prefetch is done ahead, final PLDs that overflow the
 966         // copied area can be easily avoided. 'count' is predecreased
 967         // by the prefetch distance to optimize the inner loop and the
 968         // outer loop skips the PLD.
 969         __ subs_32(count, count, (bytes_per_loop+pld_offset)/bytes_per_count);
 970 
 971         // skip prefetch for small copies
 972         __ b(L_skip_pld, lt);
 973       }
 974 
 975       int offset = ArmCopyCacheLineSize;
 976       while (offset <= pld_offset) {
 977         prefetch(from, to, offset);
 978         offset += ArmCopyCacheLineSize;
 979       };
 980     }
 981 
 982     {
 983       // 32-bit ARM note: we have tried implementing loop unrolling to skip one
 984       // PLD with 64 bytes cache line but the gain was not significant.
 985 
 986       Label L_copy_loop;
 987       __ align(OptoLoopAlignment);
 988       __ BIND(L_copy_loop);
 989 
 990       if (prefetch_before) {
 991         prefetch(from, to, bytes_per_loop + pld_offset);
 992         __ BIND(L_skip_pld);
 993       }
 994 
 995       if (split_read) {
 996         // Split the register set in two sets so that there is less
 997         // latency between LDM and STM (R3-R6 available while R7-R10
 998         // still loading) and less register locking issue when iterating
 999         // on the first LDM.
1000         __ ldmia(from, RegisterSet(R3, R6), writeback);
1001         __ ldmia(from, RegisterSet(R7, R10), writeback);
1002       } else {
1003         __ ldmia(from, RegisterSet(R3, R10), writeback);
1004       }
1005 
1006       __ subs_32(count, count, count_per_loop);
1007 
1008       if (prefetch_after) {
1009         prefetch(from, to, pld_offset, bytes_per_loop);
1010       }
1011 
1012       if (split_write) {
1013         __ stmia(to, RegisterSet(R3, R6), writeback);
1014         __ stmia(to, RegisterSet(R7, R10), writeback);
1015       } else {
1016         __ stmia(to, RegisterSet(R3, R10), writeback);
1017       }
1018 
1019       __ b(L_copy_loop, ge);
1020 
1021       if (prefetch_before) {
1022         // the inner loop may end earlier, allowing to skip PLD for the last iterations
1023         __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1024         __ b(L_skip_pld, ge);
1025       }
1026     }
1027     BLOCK_COMMENT("Remaining bytes:");
1028     // still 0..bytes_per_loop-1 aligned bytes to copy, count already decreased by (at least) bytes_per_loop bytes
1029 
1030     // __ add(count, count, ...); // addition useless for the bit tests
1031     assert (pld_offset % bytes_per_loop == 0, "decreasing count by pld_offset before loop must not change tested bits");
1032 
1033     __ tst(count, 16 / bytes_per_count);
1034     __ ldmia(from, RegisterSet(R3, R6), writeback, ne); // copy 16 bytes
1035     __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1036 
1037     __ tst(count, 8 / bytes_per_count);
1038     __ ldmia(from, RegisterSet(R3, R4), writeback, ne); // copy 8 bytes
1039     __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1040 
1041     if (bytes_per_count <= 4) {
1042       __ tst(count, 4 / bytes_per_count);
1043       __ ldr(R3, Address(from, 4, post_indexed), ne); // copy 4 bytes
1044       __ str(R3, Address(to, 4, post_indexed), ne);
1045     }
1046 
1047     if (bytes_per_count <= 2) {
1048       __ tst(count, 2 / bytes_per_count);
1049       __ ldrh(R3, Address(from, 2, post_indexed), ne); // copy 2 bytes
1050       __ strh(R3, Address(to, 2, post_indexed), ne);
1051     }
1052 
1053     if (bytes_per_count == 1) {
1054       __ tst(count, 1);
1055       __ ldrb(R3, Address(from, 1, post_indexed), ne);
1056       __ strb(R3, Address(to, 1, post_indexed), ne);
1057     }
1058 
1059     __ pop(RegisterSet(R4,R10));
1060 
1061     return count_per_loop;
1062   }
1063 
1064 
1065   // Generate the inner loop for backward aligned array copy
1066   //
1067   // Arguments
1068   //      end_from:      src end address, 64 bits  aligned
1069   //      end_to:        dst end address, wordSize aligned
1070   //      count:         number of elements (32-bit int)
1071   //      bytes_per_count: number of bytes for each unit of 'count'
1072   //
1073   // Return the minimum initial value for count
1074   //
1075   // Notes:
1076   // - 'end_from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
1077   // - 'end_to' aligned on wordSize
1078   // - 'count' must be greater or equal than the returned value
1079   //
1080   // Decreases 'end_from' and 'end_to' by count*bytes_per_count.
1081   //
1082   // Scratches 'count', R3.
1083   // ARM R4-R10 are preserved (saved/restored).
1084   //
1085   int generate_backward_aligned_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count) {
1086     assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
1087 
1088     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
1089     const int count_per_loop = bytes_per_loop / bytes_per_count;
1090 
1091     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].backward_aligned;
1092     int pld_offset = config->pld_distance;
1093 
1094     bool split_read= config->split_ldm;
1095     bool split_write= config->split_stm;
1096 
1097     // See the forward copy variant for additional comments.
1098 
1099     __ push(RegisterSet(R4,R10));
1100 
1101     __ sub_32(count, count, count_per_loop);
1102 
1103     const bool prefetch_before = pld_offset < 0;
1104     const bool prefetch_after = pld_offset > 0;
1105 
1106     Label L_skip_pld;
1107 
1108     if (pld_offset != 0) {
1109       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1110 
1111       prefetch(end_from, end_to, -wordSize);
1112 
1113       if (prefetch_before) {
1114         __ subs_32(count, count, (bytes_per_loop + pld_offset) / bytes_per_count);
1115         __ b(L_skip_pld, lt);
1116       }
1117 
1118       int offset = ArmCopyCacheLineSize;
1119       while (offset <= pld_offset) {
1120         prefetch(end_from, end_to, -(wordSize + offset));
1121         offset += ArmCopyCacheLineSize;
1122       };
1123     }
1124 
1125     {
1126       // 32-bit ARM note: we have tried implementing loop unrolling to skip one
1127       // PLD with 64 bytes cache line but the gain was not significant.
1128 
1129       Label L_copy_loop;
1130       __ align(OptoLoopAlignment);
1131       __ BIND(L_copy_loop);
1132 
1133       if (prefetch_before) {
1134         prefetch(end_from, end_to, -(wordSize + bytes_per_loop + pld_offset));
1135         __ BIND(L_skip_pld);
1136       }
1137 
1138       if (split_read) {
1139         __ ldmdb(end_from, RegisterSet(R7, R10), writeback);
1140         __ ldmdb(end_from, RegisterSet(R3, R6), writeback);
1141       } else {
1142         __ ldmdb(end_from, RegisterSet(R3, R10), writeback);
1143       }
1144 
1145       __ subs_32(count, count, count_per_loop);
1146 
1147       if (prefetch_after) {
1148         prefetch(end_from, end_to, -(wordSize + pld_offset), -bytes_per_loop);
1149       }
1150 
1151       if (split_write) {
1152         __ stmdb(end_to, RegisterSet(R7, R10), writeback);
1153         __ stmdb(end_to, RegisterSet(R3, R6), writeback);
1154       } else {
1155         __ stmdb(end_to, RegisterSet(R3, R10), writeback);
1156       }
1157 
1158       __ b(L_copy_loop, ge);
1159 
1160       if (prefetch_before) {
1161         __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1162         __ b(L_skip_pld, ge);
1163       }
1164     }
1165     BLOCK_COMMENT("Remaining bytes:");
1166     // still 0..bytes_per_loop-1 aligned bytes to copy, count already decreased by (at least) bytes_per_loop bytes
1167 
1168     // __ add(count, count, ...); // addition useless for the bit tests
1169     assert (pld_offset % bytes_per_loop == 0, "decreasing count by pld_offset before loop must not change tested bits");
1170 
1171     __ tst(count, 16 / bytes_per_count);
1172     __ ldmdb(end_from, RegisterSet(R3, R6), writeback, ne); // copy 16 bytes
1173     __ stmdb(end_to, RegisterSet(R3, R6), writeback, ne);
1174 
1175     __ tst(count, 8 / bytes_per_count);
1176     __ ldmdb(end_from, RegisterSet(R3, R4), writeback, ne); // copy 8 bytes
1177     __ stmdb(end_to, RegisterSet(R3, R4), writeback, ne);
1178 
1179     if (bytes_per_count <= 4) {
1180       __ tst(count, 4 / bytes_per_count);
1181       __ ldr(R3, Address(end_from, -4, pre_indexed), ne); // copy 4 bytes
1182       __ str(R3, Address(end_to, -4, pre_indexed), ne);
1183     }
1184 
1185     if (bytes_per_count <= 2) {
1186       __ tst(count, 2 / bytes_per_count);
1187       __ ldrh(R3, Address(end_from, -2, pre_indexed), ne); // copy 2 bytes
1188       __ strh(R3, Address(end_to, -2, pre_indexed), ne);
1189     }
1190 
1191     if (bytes_per_count == 1) {
1192       __ tst(count, 1);
1193       __ ldrb(R3, Address(end_from, -1, pre_indexed), ne);
1194       __ strb(R3, Address(end_to, -1, pre_indexed), ne);
1195     }
1196 
1197     __ pop(RegisterSet(R4,R10));
1198 
1199     return count_per_loop;
1200   }
1201 
1202 
1203   // Generate the inner loop for shifted forward array copy (unaligned copy).
1204   // It can be used when bytes_per_count < wordSize, i.e. byte/short copy
1205   //
1206   // Arguments
1207   //      from:      start src address, 64 bits aligned
1208   //      to:        start dst address, (now) wordSize aligned
1209   //      count:     number of elements (32-bit int)
1210   //      bytes_per_count: number of bytes for each unit of 'count'
1211   //      lsr_shift: shift applied to 'old' value to skipped already written bytes
1212   //      lsl_shift: shift applied to 'new' value to set the high bytes of the next write
1213   //
1214   // Return the minimum initial value for count
1215   //
1216   // Notes:
1217   // - 'from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
1218   // - 'to' aligned on wordSize
1219   // - 'count' must be greater or equal than the returned value
1220   // - 'lsr_shift' + 'lsl_shift' = BitsPerWord
1221   // - 'bytes_per_count' is 1 or 2
1222   //
1223   // Increases 'to' by count*bytes_per_count.
1224   //
1225   // Scratches 'from' and 'count', R3-R10, R12
1226   //
1227   // On entry:
1228   // - R12 is preloaded with the first 'BitsPerWord' bits read just before 'from'
1229   // - (R12 >> lsr_shift) is the part not yet written (just before 'to')
1230   // --> (*to) = (R12 >> lsr_shift) | (*from) << lsl_shift); ...
1231   //
1232   // This implementation may read more bytes than required.
1233   // Actually, it always reads exactly all data from the copied region with upper bound aligned up by wordSize,
1234   // so excessive read do not cross a word bound and is thus harmless.
1235   //
1236   int generate_forward_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, int lsr_shift, int lsl_shift) {
1237     assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
1238 
1239     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iter
1240     const int count_per_loop = bytes_per_loop / bytes_per_count;
1241 
1242     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].forward_shifted;
1243     int pld_offset = config->pld_distance;
1244 
1245     bool split_read= config->split_ldm;
1246     bool split_write= config->split_stm;
1247 
1248     const bool prefetch_before = pld_offset < 0;
1249     const bool prefetch_after = pld_offset > 0;
1250     Label L_skip_pld, L_last_read, L_done;
1251     if (pld_offset != 0) {
1252 
1253       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1254 
1255       prefetch(from, to, 0);
1256 
1257       if (prefetch_before) {
1258         __ cmp_32(count, count_per_loop);
1259         __ b(L_last_read, lt);
1260         // skip prefetch for small copies
1261         // warning: count is predecreased by the prefetch distance to optimize the inner loop
1262         __ subs_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1263         __ b(L_skip_pld, lt);
1264       }
1265 
1266       int offset = ArmCopyCacheLineSize;
1267       while (offset <= pld_offset) {
1268         prefetch(from, to, offset);
1269         offset += ArmCopyCacheLineSize;
1270       };
1271     }
1272 
1273     Label L_shifted_loop;
1274 
1275     __ align(OptoLoopAlignment);
1276     __ BIND(L_shifted_loop);
1277 
1278     if (prefetch_before) {
1279       // do it early if there might be register locking issues
1280       prefetch(from, to, bytes_per_loop + pld_offset);
1281       __ BIND(L_skip_pld);
1282     } else {
1283       __ cmp_32(count, count_per_loop);
1284       __ b(L_last_read, lt);
1285     }
1286 
1287     // read 32 bytes
1288     if (split_read) {
1289       // if write is not split, use less registers in first set to reduce locking
1290       RegisterSet set1 = split_write ? RegisterSet(R4, R7) : RegisterSet(R4, R5);
1291       RegisterSet set2 = (split_write ? RegisterSet(R8, R10) : RegisterSet(R6, R10)) | R12;
1292       __ ldmia(from, set1, writeback);
1293       __ mov(R3, AsmOperand(R12, lsr, lsr_shift)); // part of R12 not yet written
1294       __ ldmia(from, set2, writeback);
1295       __ subs(count, count, count_per_loop); // XXX: should it be before the 2nd LDM ? (latency vs locking)
1296     } else {
1297       __ mov(R3, AsmOperand(R12, lsr, lsr_shift)); // part of R12 not yet written
1298       __ ldmia(from, RegisterSet(R4, R10) | R12, writeback); // Note: small latency on R4
1299       __ subs(count, count, count_per_loop);
1300     }
1301 
1302     if (prefetch_after) {
1303       // do it after the 1st ldm/ldp anyway  (no locking issues with early STM/STP)
1304       prefetch(from, to, pld_offset, bytes_per_loop);
1305     }
1306 
1307     // prepare (shift) the values in R3..R10
1308     __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift)); // merged below low bytes of next val
1309     __ logical_shift_right(R4, R4, lsr_shift); // unused part of next val
1310     __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift)); // ...
1311     __ logical_shift_right(R5, R5, lsr_shift);
1312     __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift));
1313     __ logical_shift_right(R6, R6, lsr_shift);
1314     __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift));
1315     if (split_write) {
1316       // write the first half as soon as possible to reduce stm locking
1317       __ stmia(to, RegisterSet(R3, R6), writeback, prefetch_before ? gt : ge);
1318     }
1319     __ logical_shift_right(R7, R7, lsr_shift);
1320     __ orr(R7, R7, AsmOperand(R8, lsl, lsl_shift));
1321     __ logical_shift_right(R8, R8, lsr_shift);
1322     __ orr(R8, R8, AsmOperand(R9, lsl, lsl_shift));
1323     __ logical_shift_right(R9, R9, lsr_shift);
1324     __ orr(R9, R9, AsmOperand(R10, lsl, lsl_shift));
1325     __ logical_shift_right(R10, R10, lsr_shift);
1326     __ orr(R10, R10, AsmOperand(R12, lsl, lsl_shift));
1327 
1328     if (split_write) {
1329       __ stmia(to, RegisterSet(R7, R10), writeback, prefetch_before ? gt : ge);
1330     } else {
1331       __ stmia(to, RegisterSet(R3, R10), writeback, prefetch_before ? gt : ge);
1332     }
1333     __ b(L_shifted_loop, gt); // no need to loop if 0 (when count need not be precise modulo bytes_per_loop)
1334 
1335     if (prefetch_before) {
1336       // the first loop may end earlier, allowing to skip pld at the end
1337       __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1338       __ stmia(to, RegisterSet(R3, R10), writeback); // stmia was skipped
1339       __ b(L_skip_pld, ge);
1340       __ adds_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1341     }
1342 
1343     __ BIND(L_last_read);
1344     __ b(L_done, eq);
1345 
1346     switch (bytes_per_count) {
1347     case 2:
1348       __ mov(R3, AsmOperand(R12, lsr, lsr_shift));
1349       __ tst(count, 8);
1350       __ ldmia(from, RegisterSet(R4, R7), writeback, ne);
1351       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1352       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1353       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1354       __ mov(R5, AsmOperand(R5, lsr, lsr_shift), ne);
1355       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift), ne);
1356       __ mov(R6, AsmOperand(R6, lsr, lsr_shift), ne);
1357       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift), ne);
1358       __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1359       __ mov(R3, AsmOperand(R7, lsr, lsr_shift), ne);
1360 
1361       __ tst(count, 4);
1362       __ ldmia(from, RegisterSet(R4, R5), writeback, ne);
1363       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1364       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1365       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1366       __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1367       __ mov(R3, AsmOperand(R5, lsr, lsr_shift), ne);
1368 
1369       __ tst(count, 2);
1370       __ ldr(R4, Address(from, 4, post_indexed), ne);
1371       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne);
1372       __ str(R3, Address(to, 4, post_indexed), ne);
1373       __ mov(R3, AsmOperand(R4, lsr, lsr_shift), ne);
1374 
1375       __ tst(count, 1);
1376       __ strh(R3, Address(to, 2, post_indexed), ne); // one last short
1377       break;
1378 
1379     case 1:
1380       __ mov(R3, AsmOperand(R12, lsr, lsr_shift));
1381       __ tst(count, 16);
1382       __ ldmia(from, RegisterSet(R4, R7), writeback, ne);
1383       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1384       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1385       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1386       __ mov(R5, AsmOperand(R5, lsr, lsr_shift), ne);
1387       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift), ne);
1388       __ mov(R6, AsmOperand(R6, lsr, lsr_shift), ne);
1389       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift), ne);
1390       __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1391       __ mov(R3, AsmOperand(R7, lsr, lsr_shift), ne);
1392 
1393       __ tst(count, 8);
1394       __ ldmia(from, RegisterSet(R4, R5), writeback, ne);
1395       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1396       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1397       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1398       __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1399       __ mov(R3, AsmOperand(R5, lsr, lsr_shift), ne);
1400 
1401       __ tst(count, 4);
1402       __ ldr(R4, Address(from, 4, post_indexed), ne);
1403       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne);
1404       __ str(R3, Address(to, 4, post_indexed), ne);
1405       __ mov(R3, AsmOperand(R4, lsr, lsr_shift), ne);
1406 
1407       __ andr(count, count, 3);
1408       __ cmp(count, 2);
1409 
1410       // Note: R3 might contain enough bytes ready to write (3 needed at most),
1411       // thus load on lsl_shift==24 is not needed (in fact forces reading
1412       // beyond source buffer end boundary)
1413       if (lsl_shift == 8) {
1414         __ ldr(R4, Address(from, 4, post_indexed), ge);
1415         __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ge);
1416       } else if (lsl_shift == 16) {
1417         __ ldr(R4, Address(from, 4, post_indexed), gt);
1418         __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), gt);
1419       }
1420 
1421       __ strh(R3, Address(to, 2, post_indexed), ge); // two last bytes
1422       __ mov(R3, AsmOperand(R3, lsr, 16), gt);
1423 
1424       __ tst(count, 1);
1425       __ strb(R3, Address(to, 1, post_indexed), ne); // one last byte
1426       break;
1427     }
1428 
1429     __ BIND(L_done);
1430     return 0; // no minimum
1431   }
1432 
1433   // Generate the inner loop for shifted backward array copy (unaligned copy).
1434   // It can be used when bytes_per_count < wordSize, i.e. byte/short copy
1435   //
1436   // Arguments
1437   //      end_from:  end src address, 64 bits aligned
1438   //      end_to:    end dst address, (now) wordSize aligned
1439   //      count:     number of elements (32-bit int)
1440   //      bytes_per_count: number of bytes for each unit of 'count'
1441   //      lsl_shift: shift applied to 'old' value to skipped already written bytes
1442   //      lsr_shift: shift applied to 'new' value to set the low bytes of the next write
1443   //
1444   // Return the minimum initial value for count
1445   //
1446   // Notes:
1447   // - 'end_from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
1448   // - 'end_to' aligned on wordSize
1449   // - 'count' must be greater or equal than the returned value
1450   // - 'lsr_shift' + 'lsl_shift' = 'BitsPerWord'
1451   // - 'bytes_per_count' is 1 or 2 on 32-bit ARM
1452   //
1453   // Decreases 'end_to' by count*bytes_per_count.
1454   //
1455   // Scratches 'end_from', 'count', R3-R10, R12
1456   //
1457   // On entry:
1458   // - R3 is preloaded with the first 'BitsPerWord' bits read just after 'from'
1459   // - (R3 << lsl_shift) is the part not yet written
1460   // --> (*--to) = (R3 << lsl_shift) | (*--from) >> lsr_shift); ...
1461   //
1462   // This implementation may read more bytes than required.
1463   // Actually, it always reads exactly all data from the copied region with beginning aligned down by wordSize,
1464   // so excessive read do not cross a word bound and is thus harmless.
1465   //
1466   int generate_backward_shifted_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count, int lsr_shift, int lsl_shift) {
1467     assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
1468 
1469     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iter
1470     const int count_per_loop = bytes_per_loop / bytes_per_count;
1471 
1472     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].backward_shifted;
1473     int pld_offset = config->pld_distance;
1474 
1475     bool split_read= config->split_ldm;
1476     bool split_write= config->split_stm;
1477 
1478 
1479     const bool prefetch_before = pld_offset < 0;
1480     const bool prefetch_after = pld_offset > 0;
1481 
1482     Label L_skip_pld, L_done, L_last_read;
1483     if (pld_offset != 0) {
1484 
1485       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1486 
1487       prefetch(end_from, end_to, -wordSize);
1488 
1489       if (prefetch_before) {
1490         __ cmp_32(count, count_per_loop);
1491         __ b(L_last_read, lt);
1492 
1493         // skip prefetch for small copies
1494         // warning: count is predecreased by the prefetch distance to optimize the inner loop
1495         __ subs_32(count, count, ((bytes_per_loop + pld_offset)/bytes_per_count) + count_per_loop);
1496         __ b(L_skip_pld, lt);
1497       }
1498 
1499       int offset = ArmCopyCacheLineSize;
1500       while (offset <= pld_offset) {
1501         prefetch(end_from, end_to, -(wordSize + offset));
1502         offset += ArmCopyCacheLineSize;
1503       };
1504     }
1505 
1506     Label L_shifted_loop;
1507     __ align(OptoLoopAlignment);
1508     __ BIND(L_shifted_loop);
1509 
1510     if (prefetch_before) {
1511       // do the 1st ldm/ldp first anyway (no locking issues with early STM/STP)
1512       prefetch(end_from, end_to, -(wordSize + bytes_per_loop + pld_offset));
1513       __ BIND(L_skip_pld);
1514     } else {
1515       __ cmp_32(count, count_per_loop);
1516       __ b(L_last_read, lt);
1517     }
1518 
1519     if (split_read) {
1520       __ ldmdb(end_from, RegisterSet(R7, R10), writeback);
1521       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1522       __ ldmdb(end_from, RegisterSet(R3, R6), writeback);
1523     } else {
1524       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1525       __ ldmdb(end_from, RegisterSet(R3, R10), writeback);
1526     }
1527 
1528     __ subs_32(count, count, count_per_loop);
1529 
1530     if (prefetch_after) { // do prefetch during ldm/ldp latency
1531       prefetch(end_from, end_to, -(wordSize + pld_offset), -bytes_per_loop);
1532     }
1533 
1534     // prepare the values in R4..R10,R12
1535     __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift)); // merged above high  bytes of prev val
1536     __ logical_shift_left(R10, R10, lsl_shift); // unused part of prev val
1537     __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift)); // ...
1538     __ logical_shift_left(R9, R9, lsl_shift);
1539     __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift));
1540     __ logical_shift_left(R8, R8, lsl_shift);
1541     __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift));
1542     __ logical_shift_left(R7, R7, lsl_shift);
1543     __ orr(R7, R7, AsmOperand(R6, lsr, lsr_shift));
1544     __ logical_shift_left(R6, R6, lsl_shift);
1545     __ orr(R6, R6, AsmOperand(R5, lsr, lsr_shift));
1546     if (split_write) {
1547       // store early to reduce locking issues
1548       __ stmdb(end_to, RegisterSet(R6, R10) | R12, writeback, prefetch_before ? gt : ge);
1549     }
1550     __ logical_shift_left(R5, R5, lsl_shift);
1551     __ orr(R5, R5, AsmOperand(R4, lsr, lsr_shift));
1552     __ logical_shift_left(R4, R4, lsl_shift);
1553     __ orr(R4, R4, AsmOperand(R3, lsr, lsr_shift));
1554 
1555     if (split_write) {
1556       __ stmdb(end_to, RegisterSet(R4, R5), writeback, prefetch_before ? gt : ge);
1557     } else {
1558       __ stmdb(end_to, RegisterSet(R4, R10) | R12, writeback, prefetch_before ? gt : ge);
1559     }
1560 
1561     __ b(L_shifted_loop, gt); // no need to loop if 0 (when count need not be precise modulo bytes_per_loop)
1562 
1563     if (prefetch_before) {
1564       // the first loop may end earlier, allowing to skip pld at the end
1565       __ cmn_32(count, ((bytes_per_loop + pld_offset)/bytes_per_count));
1566       __ stmdb(end_to, RegisterSet(R4, R10) | R12, writeback); // stmdb was skipped
1567       __ b(L_skip_pld, ge);
1568       __ adds_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1569     }
1570 
1571     __ BIND(L_last_read);
1572     __ b(L_done, eq);
1573 
1574       switch(bytes_per_count) {
1575       case 2:
1576       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1577       __ tst(count, 8);
1578       __ ldmdb(end_from, RegisterSet(R7,R10), writeback, ne);
1579       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1580       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1581       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
1582       __ mov(R9, AsmOperand(R9, lsl, lsl_shift),ne);
1583       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift),ne);
1584       __ mov(R8, AsmOperand(R8, lsl, lsl_shift),ne);
1585       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift),ne);
1586       __ stmdb(end_to, RegisterSet(R8,R10)|R12, writeback, ne);
1587       __ mov(R12, AsmOperand(R7, lsl, lsl_shift), ne);
1588 
1589       __ tst(count, 4);
1590       __ ldmdb(end_from, RegisterSet(R9, R10), writeback, ne);
1591       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1592       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1593       __ orr(R10, R10, AsmOperand(R9, lsr,lsr_shift),ne); // ...
1594       __ stmdb(end_to, RegisterSet(R10)|R12, writeback, ne);
1595       __ mov(R12, AsmOperand(R9, lsl, lsl_shift), ne);
1596 
1597       __ tst(count, 2);
1598       __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
1599       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1600       __ str(R12, Address(end_to, -4, pre_indexed), ne);
1601       __ mov(R12, AsmOperand(R10, lsl, lsl_shift), ne);
1602 
1603       __ tst(count, 1);
1604       __ mov(R12, AsmOperand(R12, lsr, lsr_shift),ne);
1605       __ strh(R12, Address(end_to, -2, pre_indexed), ne); // one last short
1606       break;
1607 
1608       case 1:
1609       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1610       __ tst(count, 16);
1611       __ ldmdb(end_from, RegisterSet(R7,R10), writeback, ne);
1612       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1613       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1614       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
1615       __ mov(R9, AsmOperand(R9, lsl, lsl_shift),ne);
1616       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift),ne);
1617       __ mov(R8, AsmOperand(R8, lsl, lsl_shift),ne);
1618       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift),ne);
1619       __ stmdb(end_to, RegisterSet(R8,R10)|R12, writeback, ne);
1620       __ mov(R12, AsmOperand(R7, lsl, lsl_shift), ne);
1621 
1622       __ tst(count, 8);
1623       __ ldmdb(end_from, RegisterSet(R9,R10), writeback, ne);
1624       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1625       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1626       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
1627       __ stmdb(end_to, RegisterSet(R10)|R12, writeback, ne);
1628       __ mov(R12, AsmOperand(R9, lsl, lsl_shift), ne);
1629 
1630       __ tst(count, 4);
1631       __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
1632       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1633       __ str(R12, Address(end_to, -4, pre_indexed), ne);
1634       __ mov(R12, AsmOperand(R10, lsl, lsl_shift), ne);
1635 
1636       __ tst(count, 2);
1637       if (lsr_shift != 24) {
1638         // avoid useless reading R10 when we already have 3 bytes ready in R12
1639         __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
1640         __ orr(R12, R12, AsmOperand(R10, lsr,lsr_shift), ne);
1641       }
1642 
1643       // Note: R12 contains enough bytes ready to write (3 needed at most)
1644       // write the 2 MSBs
1645       __ mov(R9, AsmOperand(R12, lsr, 16), ne);
1646       __ strh(R9, Address(end_to, -2, pre_indexed), ne);
1647       // promote remaining to MSB
1648       __ mov(R12, AsmOperand(R12, lsl, 16), ne);
1649 
1650       __ tst(count, 1);
1651       // write the MSB of R12
1652       __ mov(R12, AsmOperand(R12, lsr, 24), ne);
1653       __ strb(R12, Address(end_to, -1, pre_indexed), ne);
1654 
1655       break;
1656       }
1657 
1658     __ BIND(L_done);
1659     return 0; // no minimum
1660   }
1661 
1662   // This method is very useful for merging forward/backward implementations
1663   Address get_addr_with_indexing(Register base, int delta, bool forward) {
1664     if (forward) {
1665       return Address(base, delta, post_indexed);
1666     } else {
1667       return Address(base, -delta, pre_indexed);
1668     }
1669   }
1670 
1671   void load_one(Register rd, Register from, int size_in_bytes, bool forward, AsmCondition cond = al, Register rd2 = noreg) {
1672     assert_different_registers(from, rd, rd2);
1673     if (size_in_bytes < 8) {
1674       Address addr = get_addr_with_indexing(from, size_in_bytes, forward);
1675       __ load_sized_value(rd, addr, size_in_bytes, false, cond);
1676     } else {
1677       assert (rd2 != noreg, "second value register must be specified");
1678       assert (rd->encoding() < rd2->encoding(), "wrong value register set");
1679 
1680       if (forward) {
1681         __ ldmia(from, RegisterSet(rd) | rd2, writeback, cond);
1682       } else {
1683         __ ldmdb(from, RegisterSet(rd) | rd2, writeback, cond);
1684       }
1685     }
1686   }
1687 
1688   void store_one(Register rd, Register to, int size_in_bytes, bool forward, AsmCondition cond = al, Register rd2 = noreg) {
1689     assert_different_registers(to, rd, rd2);
1690     if (size_in_bytes < 8) {
1691       Address addr = get_addr_with_indexing(to, size_in_bytes, forward);
1692       __ store_sized_value(rd, addr, size_in_bytes, cond);
1693     } else {
1694       assert (rd2 != noreg, "second value register must be specified");
1695       assert (rd->encoding() < rd2->encoding(), "wrong value register set");
1696 
1697       if (forward) {
1698         __ stmia(to, RegisterSet(rd) | rd2, writeback, cond);
1699       } else {
1700         __ stmdb(to, RegisterSet(rd) | rd2, writeback, cond);
1701       }
1702     }
1703   }
1704 
1705   // Copies data from 'from' to 'to' in specified direction to align 'from' by 64 bits.
1706   // (on 32-bit ARM 64-bit alignment is better for LDM).
1707   //
1708   // Arguments:
1709   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1710   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1711   //     count:             32-bit int, maximum number of elements which can be copied
1712   //     bytes_per_count:   size of an element
1713   //     forward:           specifies copy direction
1714   //
1715   // Notes:
1716   //   'from' and 'to' must be aligned by 'bytes_per_count'
1717   //   'count' must not be less than the returned value
1718   //   shifts 'from' and 'to' by the number of copied bytes in corresponding direction
1719   //   decreases 'count' by the number of elements copied
1720   //
1721   // Returns maximum number of bytes which may be copied.
1722   int align_src(Register from, Register to, Register count, Register tmp, int bytes_per_count, bool forward) {
1723     assert_different_registers(from, to, count, tmp);
1724     if (bytes_per_count < 8) {
1725       Label L_align_src;
1726       __ BIND(L_align_src);
1727       __ tst(from, 7);
1728       // ne => not aligned: copy one element and (if bytes_per_count < 4) loop
1729       __ sub(count, count, 1, ne);
1730       load_one(tmp, from, bytes_per_count, forward, ne);
1731       store_one(tmp, to, bytes_per_count, forward, ne);
1732       if (bytes_per_count < 4) {
1733         __ b(L_align_src, ne); // if bytes_per_count == 4, then 0 or 1 loop iterations are enough
1734       }
1735     }
1736     return 7/bytes_per_count;
1737   }
1738 
1739   // Copies 'count' of 'bytes_per_count'-sized elements in the specified direction.
1740   //
1741   // Arguments:
1742   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1743   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1744   //     count:             32-bit int, number of elements to be copied
1745   //     entry:             copy loop entry point
1746   //     bytes_per_count:   size of an element
1747   //     forward:           specifies copy direction
1748   //
1749   // Notes:
1750   //     shifts 'from' and 'to'
1751   void copy_small_array(Register from, Register to, Register count, Register tmp, Register tmp2, int bytes_per_count, bool forward, Label & entry) {
1752     assert_different_registers(from, to, count, tmp);
1753 
1754     __ align(OptoLoopAlignment);
1755     Label L_small_loop;
1756     __ BIND(L_small_loop);
1757     store_one(tmp, to, bytes_per_count, forward, al, tmp2);
1758     __ BIND(entry); // entry point
1759     __ subs(count, count, 1);
1760     load_one(tmp, from, bytes_per_count, forward, ge, tmp2);
1761     __ b(L_small_loop, ge);
1762   }
1763 
1764   // Aligns 'to' by reading one word from 'from' and writting its part to 'to'.
1765   //
1766   // Arguments:
1767   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1768   //     count:             32-bit int, number of elements allowed to be copied
1769   //     to_remainder:      remainder of dividing 'to' by wordSize
1770   //     bytes_per_count:   size of an element
1771   //     forward:           specifies copy direction
1772   //     Rval:              contains an already read but not yet written word;
1773   //                        its' LSBs (if forward) or MSBs (if !forward) are to be written to align 'to'.
1774   //
1775   // Notes:
1776   //     'count' must not be less then the returned value
1777   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
1778   //     shifts 'to' by the number of written bytes (so that it becomes the bound of memory to be written)
1779   //     decreases 'count' by the the number of elements written
1780   //     Rval's MSBs or LSBs remain to be written further by generate_{forward,backward}_shifted_copy_loop
1781   int align_dst(Register to, Register count, Register Rval, Register tmp,
1782                                         int to_remainder, int bytes_per_count, bool forward) {
1783     assert_different_registers(to, count, tmp, Rval);
1784 
1785     assert (0 < to_remainder && to_remainder < wordSize, "to_remainder is not valid");
1786     assert (to_remainder % bytes_per_count == 0, "to must be aligned by bytes_per_count");
1787 
1788     int bytes_to_write = forward ? (wordSize - to_remainder) : to_remainder;
1789 
1790     int offset = 0;
1791 
1792     for (int l = 0; l < LogBytesPerWord; ++l) {
1793       int s = (1 << l);
1794       if (bytes_to_write & s) {
1795         int new_offset = offset + s*BitsPerByte;
1796         if (forward) {
1797           if (offset == 0) {
1798             store_one(Rval, to, s, forward);
1799           } else {
1800             __ logical_shift_right(tmp, Rval, offset);
1801             store_one(tmp, to, s, forward);
1802           }
1803         } else {
1804           __ logical_shift_right(tmp, Rval, BitsPerWord - new_offset);
1805           store_one(tmp, to, s, forward);
1806         }
1807 
1808         offset = new_offset;
1809       }
1810     }
1811 
1812     assert (offset == bytes_to_write * BitsPerByte, "all bytes must be copied");
1813 
1814     __ sub_32(count, count, bytes_to_write/bytes_per_count);
1815 
1816     return bytes_to_write / bytes_per_count;
1817   }
1818 
1819   // Copies 'count' of elements using shifted copy loop
1820   //
1821   // Arguments:
1822   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1823   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1824   //     count:             32-bit int, number of elements to be copied
1825   //     to_remainder:      remainder of dividing 'to' by wordSize
1826   //     bytes_per_count:   size of an element
1827   //     forward:           specifies copy direction
1828   //     Rval:              contains an already read but not yet written word
1829   //
1830   //
1831   // Notes:
1832   //     'count' must not be less then the returned value
1833   //     'from' must be aligned by wordSize
1834   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
1835   //     shifts 'to' by the number of copied bytes
1836   //
1837   // Scratches R3-R10, R12
1838   int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, Register Rval,
1839                                                         int to_remainder, int bytes_per_count, bool forward) {
1840 
1841     assert (0 < to_remainder && to_remainder < wordSize, "to_remainder is invalid");
1842 
1843     const Register tmp  = forward ? R3 : R12;
1844     assert_different_registers(from, to, count, Rval, tmp);
1845 
1846     int required_to_align = align_dst(to, count, Rval, tmp, to_remainder, bytes_per_count, forward);
1847 
1848     int lsr_shift = (wordSize - to_remainder) * BitsPerByte;
1849     int lsl_shift = to_remainder * BitsPerByte;
1850 
1851     int min_copy;
1852     if (forward) {
1853       min_copy = generate_forward_shifted_copy_loop(from, to, count, bytes_per_count, lsr_shift, lsl_shift);
1854     } else {
1855       min_copy = generate_backward_shifted_copy_loop(from, to, count, bytes_per_count, lsr_shift, lsl_shift);
1856     }
1857 
1858     return min_copy + required_to_align;
1859   }
1860 
1861   // Copies 'count' of elements using shifted copy loop
1862   //
1863   // Arguments:
1864   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1865   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1866   //     count:             32-bit int, number of elements to be copied
1867   //     bytes_per_count:   size of an element
1868   //     forward:           specifies copy direction
1869   //
1870   // Notes:
1871   //     'count' must not be less then the returned value
1872   //     'from' must be aligned by wordSize
1873   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
1874   //     shifts 'to' by the number of copied bytes
1875   //
1876   // Scratches 'from', 'count', R3 and R12.
1877   // R4-R10 saved for use.
1878   int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, bool forward) {
1879 
1880     const Register Rval = forward ? R12 : R3; // as generate_{forward,backward}_shifted_copy_loop expect
1881 
1882     int min_copy = 0;
1883 
1884     // Note: if {seq} is a sequence of numbers, L{seq} means that if the execution reaches this point,
1885     // then the remainder of 'to' divided by wordSize is one of elements of {seq}.
1886 
1887     __ push(RegisterSet(R4,R10));
1888     load_one(Rval, from, wordSize, forward);
1889 
1890     switch (bytes_per_count) {
1891       case 2:
1892         min_copy = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
1893         break;
1894       case 1:
1895       {
1896         Label L1, L2, L3;
1897         int min_copy1, min_copy2, min_copy3;
1898 
1899         Label L_loop_finished;
1900 
1901         if (forward) {
1902             __ tbz(to, 0, L2);
1903             __ tbz(to, 1, L1);
1904 
1905             __ BIND(L3);
1906             min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
1907             __ b(L_loop_finished);
1908 
1909             __ BIND(L1);
1910             min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
1911             __ b(L_loop_finished);
1912 
1913             __ BIND(L2);
1914             min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
1915         } else {
1916             __ tbz(to, 0, L2);
1917             __ tbnz(to, 1, L3);
1918 
1919             __ BIND(L1);
1920             min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
1921             __ b(L_loop_finished);
1922 
1923              __ BIND(L3);
1924             min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
1925             __ b(L_loop_finished);
1926 
1927            __ BIND(L2);
1928             min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
1929         }
1930 
1931         min_copy = MAX2(MAX2(min_copy1, min_copy2), min_copy3);
1932 
1933         __ BIND(L_loop_finished);
1934 
1935         break;
1936       }
1937       default:
1938         ShouldNotReachHere();
1939         break;
1940     }
1941 
1942     __ pop(RegisterSet(R4,R10));
1943 
1944     return min_copy;
1945   }
1946 
1947 #ifndef PRODUCT
1948   int * get_arraycopy_counter(int bytes_per_count) {
1949     switch (bytes_per_count) {
1950       case 1:
1951         return &SharedRuntime::_jbyte_array_copy_ctr;
1952       case 2:
1953         return &SharedRuntime::_jshort_array_copy_ctr;
1954       case 4:
1955         return &SharedRuntime::_jint_array_copy_ctr;
1956       case 8:
1957         return &SharedRuntime::_jlong_array_copy_ctr;
1958       default:
1959         ShouldNotReachHere();
1960         return NULL;
1961     }
1962   }
1963 #endif // !PRODUCT
1964 
1965   //
1966   //  Generate stub for primitive array copy.  If "aligned" is true, the
1967   //  "from" and "to" addresses are assumed to be heapword aligned.
1968   //
1969   //  If "disjoint" is true, arrays are assumed to be disjoint, otherwise they may overlap and
1970   //  "nooverlap_target" must be specified as the address to jump if they don't.
1971   //
1972   // Arguments for generated stub:
1973   //      from:  R0
1974   //      to:    R1
1975   //      count: R2 treated as signed 32-bit int
1976   //
1977   address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = NULL) {
1978     __ align(CodeEntryAlignment);
1979     StubCodeMark mark(this, "StubRoutines", name);
1980     address start = __ pc();
1981 
1982     const Register from  = R0;   // source array address
1983     const Register to    = R1;   // destination array address
1984     const Register count = R2;   // elements count
1985     const Register tmp1  = R3;
1986     const Register tmp2  = R12;
1987 
1988     if (!aligned)  {
1989       BLOCK_COMMENT("Entry:");
1990     }
1991 
1992     __ zap_high_non_significant_bits(R2);
1993 
1994     if (!disjoint) {
1995       assert (nooverlap_target != NULL, "must be specified for conjoint case");
1996       array_overlap_test(nooverlap_target, exact_log2(bytes_per_count), tmp1, tmp2);
1997     }
1998 
1999     inc_counter_np(*get_arraycopy_counter(bytes_per_count), tmp1, tmp2);
2000 
2001     // Conjoint case: since execution reaches this point, the arrays overlap, so performing backward copy
2002     // Disjoint case: perform forward copy
2003     bool forward = disjoint;
2004 
2005 
2006     if (!forward) {
2007       // Set 'from' and 'to' to upper bounds
2008       int log_bytes_per_count = exact_log2(bytes_per_count);
2009       __ add_ptr_scaled_int32(to,   to,   count, log_bytes_per_count);
2010       __ add_ptr_scaled_int32(from, from, count, log_bytes_per_count);
2011     }
2012 
2013     // There are two main copy loop implementations:
2014     //  *) The huge and complex one applicable only for large enough arrays
2015     //  *) The small and simple one applicable for any array (but not efficient for large arrays).
2016     // Currently "small" implementation is used if and only if the "large" one could not be used.
2017     // XXX optim: tune the limit higher ?
2018     // Large implementation lower applicability bound is actually determined by
2019     // aligned copy loop which require <=7 bytes for src alignment, and 8 words for aligned copy loop.
2020     const int small_copy_limit = (8*wordSize + 7) / bytes_per_count;
2021 
2022     Label L_small_array;
2023     __ cmp_32(count, small_copy_limit);
2024     __ b(L_small_array, le);
2025 
2026     // Otherwise proceed with large implementation.
2027 
2028     bool from_is_aligned = (bytes_per_count >= 8);
2029     if (aligned && forward && (HeapWordSize % 8 == 0)) {
2030         // if 'from' is heapword aligned and HeapWordSize is divisible by 8,
2031         //  then from is aligned by 8
2032         from_is_aligned = true;
2033     }
2034 
2035     int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
2036     assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
2037 
2038     // now 'from' is aligned
2039 
2040     bool to_is_aligned = false;
2041 
2042     if (bytes_per_count >= wordSize) {
2043       // 'to' is aligned by bytes_per_count, so it is aligned by wordSize
2044       to_is_aligned = true;
2045     } else {
2046       if (aligned && (8 % HeapWordSize == 0) && (HeapWordSize % wordSize == 0)) {
2047         // Originally 'from' and 'to' were heapword aligned;
2048         // (from - to) has not been changed, so since now 'from' is 8-byte aligned, then it is also heapword aligned,
2049         //  so 'to' is also heapword aligned and thus aligned by wordSize.
2050         to_is_aligned = true;
2051       }
2052     }
2053 
2054     Label L_unaligned_dst;
2055 
2056     if (!to_is_aligned) {
2057       BLOCK_COMMENT("Check dst alignment:");
2058       __ tst(to, wordSize - 1);
2059       __ b(L_unaligned_dst, ne); // 'to' is not aligned
2060     }
2061 
2062     // 'from' and 'to' are properly aligned
2063 
2064     int min_copy;
2065     if (forward) {
2066       min_copy = generate_forward_aligned_copy_loop (from, to, count, bytes_per_count);
2067     } else {
2068       min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
2069     }
2070     assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
2071 
2072     if (status) {
2073       __ mov(R0, 0); // OK
2074     }
2075 
2076     __ ret();
2077 
2078     {
2079       copy_small_array(from, to, count, tmp1, tmp2, bytes_per_count, forward, L_small_array /* entry */);
2080 
2081       if (status) {
2082         __ mov(R0, 0); // OK
2083       }
2084 
2085       __ ret();
2086     }
2087 
2088     if (! to_is_aligned) {
2089       __ BIND(L_unaligned_dst);
2090       int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
2091       assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
2092 
2093       if (status) {
2094         __ mov(R0, 0); // OK
2095       }
2096 
2097       __ ret();
2098     }
2099 
2100     return start;
2101   }
2102 
2103 
2104   // Generates pattern of code to be placed after raw data copying in generate_oop_copy
2105   // Includes return from arraycopy stub.
2106   //
2107   // Arguments:
2108   //     to:       destination pointer after copying.
2109   //               if 'forward' then 'to' == upper bound, else 'to' == beginning of the modified region
2110   //     count:    total number of copied elements, 32-bit int
2111   //
2112   // Blows all volatile R0-R3, Rtemp, LR) and 'to', 'count', 'tmp' registers.
2113   void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward, DecoratorSet decorators) {
2114     assert_different_registers(to, count, tmp);
2115 
2116     if (forward) {
2117       // 'to' is upper bound of the modified region
2118       // restore initial dst:
2119       __ sub_ptr_scaled_int32(to, to, count, LogBytesPerHeapOop);
2120     }
2121 
2122     // 'to' is the beginning of the region
2123 
2124     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2125     bs->arraycopy_epilogue(_masm, decorators, true, to, count, tmp);
2126 
2127     if (status) {
2128       __ mov(R0, 0); // OK
2129     }
2130 
2131     __ pop(PC);
2132   }
2133 
2134 
2135   //  Generate stub for assign-compatible oop copy.  If "aligned" is true, the
2136   //  "from" and "to" addresses are assumed to be heapword aligned.
2137   //
2138   //  If "disjoint" is true, arrays are assumed to be disjoint, otherwise they may overlap and
2139   //  "nooverlap_target" must be specified as the address to jump if they don't.
2140   //
2141   // Arguments for generated stub:
2142   //      from:  R0
2143   //      to:    R1
2144   //      count: R2 treated as signed 32-bit int
2145   //
2146   address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = NULL) {
2147     __ align(CodeEntryAlignment);
2148     StubCodeMark mark(this, "StubRoutines", name);
2149     address start = __ pc();
2150 
2151     Register from  = R0;
2152     Register to    = R1;
2153     Register count = R2;
2154     Register tmp1  = R3;
2155     Register tmp2  = R12;
2156 
2157 
2158     if (!aligned) {
2159       BLOCK_COMMENT("Entry:");
2160     }
2161 
2162     __ zap_high_non_significant_bits(R2);
2163 
2164     if (!disjoint) {
2165       assert (nooverlap_target != NULL, "must be specified for conjoint case");
2166       array_overlap_test(nooverlap_target, LogBytesPerHeapOop, tmp1, tmp2);
2167     }
2168 
2169     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, tmp1, tmp2);
2170 
2171     // Conjoint case: since execution reaches this point, the arrays overlap, so performing backward copy
2172     // Disjoint case: perform forward copy
2173     bool forward = disjoint;
2174 
2175     const int bytes_per_count = BytesPerHeapOop;
2176     const int log_bytes_per_count = LogBytesPerHeapOop;
2177 
2178     const Register saved_count = LR;
2179     const int callee_saved_regs = 3; // R0-R2
2180 
2181     // LR is used later to save barrier args
2182     __ push(LR);
2183 
2184     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2185     if (disjoint) {
2186       decorators |= ARRAYCOPY_DISJOINT;
2187     }
2188     if (aligned) {
2189       decorators |= ARRAYCOPY_ALIGNED;
2190     }
2191 
2192     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2193     bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
2194 
2195     // save arguments for barrier generation (after the pre barrier)
2196     __ mov(saved_count, count);
2197 
2198     if (!forward) {
2199       __ add_ptr_scaled_int32(to,   to,   count, log_bytes_per_count);
2200       __ add_ptr_scaled_int32(from, from, count, log_bytes_per_count);
2201     }
2202 
2203     // for short arrays, just do single element copy
2204     Label L_small_array;
2205     const int small_copy_limit = (8*wordSize + 7)/bytes_per_count; // XXX optim: tune the limit higher ?
2206     __ cmp_32(count, small_copy_limit);
2207     __ b(L_small_array, le);
2208 
2209     bool from_is_aligned = (bytes_per_count >= 8);
2210     if (aligned && forward && (HeapWordSize % 8 == 0)) {
2211         // if 'from' is heapword aligned and HeapWordSize is divisible by 8,
2212         //  then from is aligned by 8
2213         from_is_aligned = true;
2214     }
2215 
2216     int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
2217     assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
2218 
2219     // now 'from' is aligned
2220 
2221     bool to_is_aligned = false;
2222 
2223     if (bytes_per_count >= wordSize) {
2224       // 'to' is aligned by bytes_per_count, so it is aligned by wordSize
2225       to_is_aligned = true;
2226     } else {
2227       if (aligned && (8 % HeapWordSize == 0) && (HeapWordSize % wordSize == 0)) {
2228         // Originally 'from' and 'to' were heapword aligned;
2229         // (from - to) has not been changed, so since now 'from' is 8-byte aligned, then it is also heapword aligned,
2230         //  so 'to' is also heapword aligned and thus aligned by wordSize.
2231         to_is_aligned = true;
2232       }
2233     }
2234 
2235     Label L_unaligned_dst;
2236 
2237     if (!to_is_aligned) {
2238       BLOCK_COMMENT("Check dst alignment:");
2239       __ tst(to, wordSize - 1);
2240       __ b(L_unaligned_dst, ne); // 'to' is not aligned
2241     }
2242 
2243     int min_copy;
2244     if (forward) {
2245       min_copy = generate_forward_aligned_copy_loop(from, to, count, bytes_per_count);
2246     } else {
2247       min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
2248     }
2249     assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
2250 
2251     oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
2252 
2253     {
2254       copy_small_array(from, to, count, tmp1, noreg, bytes_per_count, forward, L_small_array);
2255 
2256       oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
2257     }
2258 
2259     if (!to_is_aligned) {
2260       __ BIND(L_unaligned_dst);
2261       ShouldNotReachHere();
2262       int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
2263       assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
2264 
2265       oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
2266     }
2267 
2268     return start;
2269   }
2270 
2271   //  Generate 'unsafe' array copy stub
2272   //  Though just as safe as the other stubs, it takes an unscaled
2273   //  size_t argument instead of an element count.
2274   //
2275   // Arguments for generated stub:
2276   //      from:  R0
2277   //      to:    R1
2278   //      count: R2 byte count, treated as ssize_t, can be zero
2279   //
2280   // Examines the alignment of the operands and dispatches
2281   // to a long, int, short, or byte copy loop.
2282   //
2283   address generate_unsafe_copy(const char* name) {
2284 
2285     const Register R0_from   = R0;      // source array address
2286     const Register R1_to     = R1;      // destination array address
2287     const Register R2_count  = R2;      // elements count
2288 
2289     const Register R3_bits   = R3;      // test copy of low bits
2290 
2291     __ align(CodeEntryAlignment);
2292     StubCodeMark mark(this, "StubRoutines", name);
2293     address start = __ pc();
2294     const Register tmp = Rtemp;
2295 
2296     // bump this on entry, not on exit:
2297     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R3, tmp);
2298 
2299     __ orr(R3_bits, R0_from, R1_to);
2300     __ orr(R3_bits, R2_count, R3_bits);
2301 
2302     __ tst(R3_bits, BytesPerLong-1);
2303     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerLong), eq);
2304     __ jump(StubRoutines::_jlong_arraycopy, relocInfo::runtime_call_type, tmp, eq);
2305 
2306     __ tst(R3_bits, BytesPerInt-1);
2307     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerInt), eq);
2308     __ jump(StubRoutines::_jint_arraycopy, relocInfo::runtime_call_type, tmp, eq);
2309 
2310     __ tst(R3_bits, BytesPerShort-1);
2311     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerShort), eq);
2312     __ jump(StubRoutines::_jshort_arraycopy, relocInfo::runtime_call_type, tmp, eq);
2313 
2314     __ jump(StubRoutines::_jbyte_arraycopy, relocInfo::runtime_call_type, tmp);
2315     return start;
2316   }
2317 
2318   // Helper for generating a dynamic type check.
2319   // Smashes only the given temp registers.
2320   void generate_type_check(Register sub_klass,
2321                            Register super_check_offset,
2322                            Register super_klass,
2323                            Register tmp1,
2324                            Register tmp2,
2325                            Register tmp3,
2326                            Label& L_success) {
2327     assert_different_registers(sub_klass, super_check_offset, super_klass, tmp1, tmp2, tmp3);
2328 
2329     BLOCK_COMMENT("type_check:");
2330 
2331     // If the pointers are equal, we are done (e.g., String[] elements).
2332 
2333     __ cmp(super_klass, sub_klass);
2334     __ b(L_success, eq); // fast success
2335 
2336 
2337     Label L_loop, L_fail;
2338 
2339     int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
2340 
2341     // Check the supertype display:
2342     __ ldr(tmp1, Address(sub_klass, super_check_offset));
2343     __ cmp(tmp1, super_klass);
2344     __ b(L_success, eq);
2345 
2346     __ cmp(super_check_offset, sc_offset);
2347     __ b(L_fail, ne); // failure
2348 
2349     BLOCK_COMMENT("type_check_slow_path:");
2350 
2351     // a couple of useful fields in sub_klass:
2352     int ss_offset = in_bytes(Klass::secondary_supers_offset());
2353 
2354     // Do a linear scan of the secondary super-klass chain.
2355 
2356 #ifndef PRODUCT
2357     int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
2358     __ inc_counter((address) pst_counter, tmp1, tmp2);
2359 #endif
2360 
2361     Register scan_temp = tmp1;
2362     Register count_temp = tmp2;
2363 
2364     // We will consult the secondary-super array.
2365     __ ldr(scan_temp, Address(sub_klass, ss_offset));
2366 
2367     Register search_key = super_klass;
2368 
2369     // Load the array length.
2370     __ ldr_s32(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes()));
2371     __ add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes());
2372 
2373     __ add(count_temp, count_temp, 1);
2374 
2375     // Top of search loop
2376     __ bind(L_loop);
2377     // Notes:
2378     //  scan_temp starts at the array elements
2379     //  count_temp is 1+size
2380 
2381     __ subs(count_temp, count_temp, 1);
2382     __ b(L_fail, eq); // not found
2383 
2384     // Load next super to check
2385     // In the array of super classes elements are pointer sized.
2386     int element_size = wordSize;
2387     __ ldr(tmp3, Address(scan_temp, element_size, post_indexed));
2388 
2389     // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
2390     __ cmp(tmp3, search_key);
2391 
2392     // A miss means we are NOT a subtype and need to keep looping
2393     __ b(L_loop, ne);
2394 
2395     // Falling out the bottom means we found a hit; we ARE a subtype
2396 
2397     // Success.  Cache the super we found and proceed in triumph.
2398     __ str(super_klass, Address(sub_klass, sc_offset));
2399 
2400     // Jump to success
2401     __ b(L_success);
2402 
2403     // Fall through on failure!
2404     __ bind(L_fail);
2405   }
2406 
2407   //  Generate stub for checked oop copy.
2408   //
2409   // Arguments for generated stub:
2410   //      from:  R0
2411   //      to:    R1
2412   //      count: R2 treated as signed 32-bit int
2413   //      ckoff: R3 (super_check_offset)
2414   //      ckval: R4 (super_klass)
2415   //      ret:   R0 zero for success; (-1^K) where K is partial transfer count (32-bit)
2416   //
2417   address generate_checkcast_copy(const char * name) {
2418     __ align(CodeEntryAlignment);
2419     StubCodeMark mark(this, "StubRoutines", name);
2420     address start = __ pc();
2421 
2422     const Register from  = R0;  // source array address
2423     const Register to    = R1;  // destination array address
2424     const Register count = R2;  // elements count
2425 
2426     const Register R3_ckoff  = R3;      // super_check_offset
2427     const Register R4_ckval  = R4;      // super_klass
2428 
2429     const int callee_saved_regs = 4; // LR saved differently
2430 
2431     Label load_element, store_element, do_epilogue, fail;
2432 
2433     BLOCK_COMMENT("Entry:");
2434 
2435     __ zap_high_non_significant_bits(R2);
2436 
2437     int pushed = 0;
2438     __ push(LR);
2439     pushed+=1;
2440 
2441     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
2442 
2443     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2444     bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
2445 
2446     const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
2447     __ push(caller_saved_regs);
2448     assert(caller_saved_regs.size() == 6, "check the count");
2449     pushed+=6;
2450 
2451     __ ldr(R4_ckval,Address(SP, wordSize*pushed)); // read the argument that was on the stack
2452 
2453     // Save arguments for barrier generation (after the pre barrier):
2454     // - must be a caller saved register and not LR
2455     // - ARM32: avoid R10 in case RThread is needed
2456     const Register saved_count = altFP_7_11;
2457     __ movs(saved_count, count); // and test count
2458     __ b(load_element,ne);
2459 
2460     // nothing to copy
2461     __ mov(R0, 0);
2462 
2463     __ pop(caller_saved_regs);
2464     __ pop(PC);
2465 
2466     // ======== begin loop ========
2467     // (Loop is rotated; its entry is load_element.)
2468     __ align(OptoLoopAlignment);
2469     __ BIND(store_element);
2470     if (UseCompressedOops) {
2471       __ store_heap_oop(Address(to, BytesPerHeapOop, post_indexed), R5);  // store the oop, changes flags
2472       __ subs_32(count,count,1);
2473     } else {
2474       __ subs_32(count,count,1);
2475       __ str(R5, Address(to, BytesPerHeapOop, post_indexed));             // store the oop
2476     }
2477     __ b(do_epilogue, eq); // count exhausted
2478 
2479     // ======== loop entry is here ========
2480     __ BIND(load_element);
2481     __ load_heap_oop(R5, Address(from, BytesPerHeapOop, post_indexed));  // load the oop
2482     __ cbz(R5, store_element); // NULL
2483 
2484     __ load_klass(R6, R5);
2485 
2486     generate_type_check(R6, R3_ckoff, R4_ckval, /*tmps*/ R12, R8, R9,
2487                         // branch to this on success:
2488                         store_element);
2489     // ======== end loop ========
2490 
2491     // It was a real error; we must depend on the caller to finish the job.
2492     // Register count has number of *remaining* oops, saved_count number of *total* oops.
2493     // Emit GC store barriers for the oops we have copied
2494     // and report their number to the caller (0 or (-1^n))
2495     __ BIND(fail);
2496 
2497     // Note: fail marked by the fact that count differs from saved_count
2498 
2499     __ BIND(do_epilogue);
2500 
2501     Register copied = R4; // saved
2502     Label L_not_copied;
2503 
2504     __ subs_32(copied, saved_count, count); // copied count (in saved reg)
2505     __ b(L_not_copied, eq); // nothing was copied, skip post barrier
2506     __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
2507     __ mov(R12, copied); // count arg scratched by post barrier
2508 
2509     bs->arraycopy_epilogue(_masm, decorators, true, to, R12, R3);
2510 
2511     assert_different_registers(R3,R12,LR,copied,saved_count);
2512     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
2513 
2514     __ BIND(L_not_copied);
2515     __ cmp_32(copied, saved_count); // values preserved in saved registers
2516 
2517     __ mov(R0, 0, eq); // 0 if all copied
2518     __ mvn(R0, copied, ne); // else NOT(copied)
2519     __ pop(caller_saved_regs);
2520     __ pop(PC);
2521 
2522     return start;
2523   }
2524 
2525   // Perform range checks on the proposed arraycopy.
2526   // Kills the two temps, but nothing else.
2527   void arraycopy_range_checks(Register src,     // source array oop
2528                               Register src_pos, // source position (32-bit int)
2529                               Register dst,     // destination array oop
2530                               Register dst_pos, // destination position (32-bit int)
2531                               Register length,  // length of copy (32-bit int)
2532                               Register temp1, Register temp2,
2533                               Label& L_failed) {
2534 
2535     BLOCK_COMMENT("arraycopy_range_checks:");
2536 
2537     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2538 
2539     const Register array_length = temp1;  // scratch
2540     const Register end_pos      = temp2;  // scratch
2541 
2542     __ add_32(end_pos, length, src_pos);  // src_pos + length
2543     __ ldr_s32(array_length, Address(src, arrayOopDesc::length_offset_in_bytes()));
2544     __ cmp_32(end_pos, array_length);
2545     __ b(L_failed, hi);
2546 
2547     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2548     __ add_32(end_pos, length, dst_pos); // dst_pos + length
2549     __ ldr_s32(array_length, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2550     __ cmp_32(end_pos, array_length);
2551     __ b(L_failed, hi);
2552 
2553     BLOCK_COMMENT("arraycopy_range_checks done");
2554   }
2555 
2556   //
2557   //  Generate generic array copy stubs
2558   //
2559   //  Input:
2560   //    R0    -  src oop
2561   //    R1    -  src_pos (32-bit int)
2562   //    R2    -  dst oop
2563   //    R3    -  dst_pos (32-bit int)
2564   //    SP[0] -  element count (32-bit int)
2565   //
2566   //  Output: (32-bit int)
2567   //    R0 ==  0  -  success
2568   //    R0 <   0  -  need to call System.arraycopy
2569   //
2570   address generate_generic_copy(const char *name) {
2571     Label L_failed, L_objArray;
2572 
2573     // Input registers
2574     const Register src      = R0;  // source array oop
2575     const Register src_pos  = R1;  // source position
2576     const Register dst      = R2;  // destination array oop
2577     const Register dst_pos  = R3;  // destination position
2578 
2579     // registers used as temp
2580     const Register R5_src_klass = R5; // source array klass
2581     const Register R6_dst_klass = R6; // destination array klass
2582     const Register R_lh         = altFP_7_11; // layout handler
2583     const Register R8_temp      = R8;
2584 
2585     __ align(CodeEntryAlignment);
2586     StubCodeMark mark(this, "StubRoutines", name);
2587     address start = __ pc();
2588 
2589     __ zap_high_non_significant_bits(R1);
2590     __ zap_high_non_significant_bits(R3);
2591     __ zap_high_non_significant_bits(R4);
2592 
2593     int pushed = 0;
2594     const RegisterSet saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
2595     __ push(saved_regs);
2596     assert(saved_regs.size() == 6, "check the count");
2597     pushed+=6;
2598 
2599     // bump this on entry, not on exit:
2600     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, R5, R12);
2601 
2602     const Register length   = R4;  // elements count
2603     __ ldr(length, Address(SP,4*pushed));
2604 
2605 
2606     //-----------------------------------------------------------------------
2607     // Assembler stubs will be used for this call to arraycopy
2608     // if the following conditions are met:
2609     //
2610     // (1) src and dst must not be null.
2611     // (2) src_pos must not be negative.
2612     // (3) dst_pos must not be negative.
2613     // (4) length  must not be negative.
2614     // (5) src klass and dst klass should be the same and not NULL.
2615     // (6) src and dst should be arrays.
2616     // (7) src_pos + length must not exceed length of src.
2617     // (8) dst_pos + length must not exceed length of dst.
2618     BLOCK_COMMENT("arraycopy initial argument checks");
2619 
2620     //  if (src == NULL) return -1;
2621     __ cbz(src, L_failed);
2622 
2623     //  if (src_pos < 0) return -1;
2624     __ cmp_32(src_pos, 0);
2625     __ b(L_failed, lt);
2626 
2627     //  if (dst == NULL) return -1;
2628     __ cbz(dst, L_failed);
2629 
2630     //  if (dst_pos < 0) return -1;
2631     __ cmp_32(dst_pos, 0);
2632     __ b(L_failed, lt);
2633 
2634     //  if (length < 0) return -1;
2635     __ cmp_32(length, 0);
2636     __ b(L_failed, lt);
2637 
2638     BLOCK_COMMENT("arraycopy argument klass checks");
2639     //  get src->klass()
2640     __ load_klass(R5_src_klass, src);
2641 
2642     // Load layout helper
2643     //
2644     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2645     // 32        30    24            16              8     2                 0
2646     //
2647     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2648     //
2649 
2650     int lh_offset = in_bytes(Klass::layout_helper_offset());
2651     __ ldr_u32(R_lh, Address(R5_src_klass, lh_offset));
2652 
2653     __ load_klass(R6_dst_klass, dst);
2654 
2655     // Handle objArrays completely differently...
2656     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2657     __ mov_slow(R8_temp, objArray_lh);
2658     __ cmp_32(R_lh, R8_temp);
2659     __ b(L_objArray,eq);
2660 
2661     //  if (src->klass() != dst->klass()) return -1;
2662     __ cmp(R5_src_klass, R6_dst_klass);
2663     __ b(L_failed, ne);
2664 
2665     //  if (!src->is_Array()) return -1;
2666     __ cmp_32(R_lh, Klass::_lh_neutral_value); // < 0
2667     __ b(L_failed, ge);
2668 
2669     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2670                            R8_temp, R6_dst_klass, L_failed);
2671 
2672     {
2673       // TypeArrayKlass
2674       //
2675       // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2676       // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2677       //
2678 
2679       const Register R6_offset = R6_dst_klass;    // array offset
2680       const Register R12_elsize = R12;            // log2 element size
2681 
2682       __ logical_shift_right(R6_offset, R_lh, Klass::_lh_header_size_shift);
2683       __ andr(R6_offset, R6_offset, (unsigned int)Klass::_lh_header_size_mask); // array_offset
2684       __ add(src, src, R6_offset);       // src array offset
2685       __ add(dst, dst, R6_offset);       // dst array offset
2686       __ andr(R12_elsize, R_lh, (unsigned int)Klass::_lh_log2_element_size_mask); // log2 element size
2687 
2688       // next registers should be set before the jump to corresponding stub
2689       const Register from     = R0;  // source array address
2690       const Register to       = R1;  // destination array address
2691       const Register count    = R2;  // elements count
2692 
2693       // 'from', 'to', 'count' registers should be set in this order
2694       // since they are the same as 'src', 'src_pos', 'dst'.
2695 
2696 
2697       BLOCK_COMMENT("scale indexes to element size");
2698       __ add(from, src, AsmOperand(src_pos, lsl, R12_elsize));       // src_addr
2699       __ add(to, dst, AsmOperand(dst_pos, lsl, R12_elsize));         // dst_addr
2700 
2701       __ mov(count, length);  // length
2702 
2703       // XXX optim: avoid later push in arraycopy variants ?
2704 
2705       __ pop(saved_regs);
2706 
2707       BLOCK_COMMENT("choose copy loop based on element size");
2708       __ cmp(R12_elsize, 0);
2709       __ b(StubRoutines::_jbyte_arraycopy,eq);
2710 
2711       __ cmp(R12_elsize, LogBytesPerShort);
2712       __ b(StubRoutines::_jshort_arraycopy,eq);
2713 
2714       __ cmp(R12_elsize, LogBytesPerInt);
2715       __ b(StubRoutines::_jint_arraycopy,eq);
2716 
2717       __ b(StubRoutines::_jlong_arraycopy);
2718 
2719     }
2720 
2721     // ObjArrayKlass
2722     __ BIND(L_objArray);
2723     // live at this point:  R5_src_klass, R6_dst_klass, src[_pos], dst[_pos], length
2724 
2725     Label L_plain_copy, L_checkcast_copy;
2726     //  test array classes for subtyping
2727     __ cmp(R5_src_klass, R6_dst_klass);         // usual case is exact equality
2728     __ b(L_checkcast_copy, ne);
2729 
2730     BLOCK_COMMENT("Identically typed arrays");
2731     {
2732       // Identically typed arrays can be copied without element-wise checks.
2733       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2734                              R8_temp, R_lh, L_failed);
2735 
2736       // next registers should be set before the jump to corresponding stub
2737       const Register from     = R0;  // source array address
2738       const Register to       = R1;  // destination array address
2739       const Register count    = R2;  // elements count
2740 
2741       __ add(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2742       __ add(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2743       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerHeapOop);         // src_addr
2744       __ add_ptr_scaled_int32(to, dst, dst_pos, LogBytesPerHeapOop);           // dst_addr
2745       __ BIND(L_plain_copy);
2746       __ mov(count, length);
2747 
2748       __ pop(saved_regs); // XXX optim: avoid later push in oop_arraycopy ?
2749       __ b(StubRoutines::_oop_arraycopy);
2750     }
2751 
2752     {
2753       __ BIND(L_checkcast_copy);
2754       // live at this point:  R5_src_klass, R6_dst_klass
2755 
2756       // Before looking at dst.length, make sure dst is also an objArray.
2757       __ ldr_u32(R8_temp, Address(R6_dst_klass, lh_offset));
2758       __ cmp_32(R_lh, R8_temp);
2759       __ b(L_failed, ne);
2760 
2761       // It is safe to examine both src.length and dst.length.
2762 
2763       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2764                              R8_temp, R_lh, L_failed);
2765 
2766       // next registers should be set before the jump to corresponding stub
2767       const Register from     = R0;  // source array address
2768       const Register to       = R1;  // destination array address
2769       const Register count    = R2;  // elements count
2770 
2771       // Marshal the base address arguments now, freeing registers.
2772       __ add(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2773       __ add(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2774       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerHeapOop);         // src_addr
2775       __ add_ptr_scaled_int32(to, dst, dst_pos, LogBytesPerHeapOop);           // dst_addr
2776 
2777       __ mov(count, length); // length (reloaded)
2778 
2779       Register sco_temp = R3;                   // this register is free now
2780       assert_different_registers(from, to, count, sco_temp,
2781                                  R6_dst_klass, R5_src_klass);
2782 
2783       // Generate the type check.
2784       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2785       __ ldr_u32(sco_temp, Address(R6_dst_klass, sco_offset));
2786       generate_type_check(R5_src_klass, sco_temp, R6_dst_klass,
2787                           R8_temp, R9,
2788                           R12,
2789                           L_plain_copy);
2790 
2791       // Fetch destination element klass from the ObjArrayKlass header.
2792       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2793 
2794       // the checkcast_copy loop needs two extra arguments:
2795       const Register Rdst_elem_klass = R3;
2796       __ ldr(Rdst_elem_klass, Address(R6_dst_klass, ek_offset));   // dest elem klass
2797       __ pop(saved_regs); // XXX optim: avoid later push in oop_arraycopy ?
2798       __ str(Rdst_elem_klass, Address(SP,0));    // dest elem klass argument
2799       __ ldr_u32(R3, Address(Rdst_elem_klass, sco_offset));  // sco of elem klass
2800       __ b(StubRoutines::_checkcast_arraycopy);
2801     }
2802 
2803     __ BIND(L_failed);
2804 
2805     __ pop(saved_regs);
2806     __ mvn(R0, 0); // failure, with 0 copied
2807     __ ret();
2808 
2809     return start;
2810   }
2811 
2812   // Safefetch stubs.
2813   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2814     // safefetch signatures:
2815     //   int      SafeFetch32(int*      adr, int      errValue);
2816     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2817     //
2818     // arguments:
2819     //   R0 = adr
2820     //   R1 = errValue
2821     //
2822     // result:
2823     //   R0  = *adr or errValue
2824 
2825     StubCodeMark mark(this, "StubRoutines", name);
2826 
2827     // Entry point, pc or function descriptor.
2828     *entry = __ pc();
2829 
2830     // Load *adr into c_rarg2, may fault.
2831     *fault_pc = __ pc();
2832 
2833     switch (size) {
2834       case 4: // int32_t
2835         __ ldr_s32(R1, Address(R0));
2836         break;
2837 
2838       case 8: // int64_t
2839         Unimplemented();
2840         break;
2841 
2842       default:
2843         ShouldNotReachHere();
2844     }
2845 
2846     // return errValue or *adr
2847     *continuation_pc = __ pc();
2848     __ mov(R0, R1);
2849     __ ret();
2850   }
2851 
2852   void generate_arraycopy_stubs() {
2853 
2854     // Note:  the disjoint stubs must be generated first, some of
2855     //        the conjoint stubs use them.
2856 
2857     bool status = false; // non failing C2 stubs need not return a status in R0
2858 
2859 #ifdef TEST_C2_GENERIC_ARRAYCOPY /* Internal development flag */
2860     // With this flag, the C2 stubs are tested by generating calls to
2861     // generic_arraycopy instead of Runtime1::arraycopy
2862 
2863     // Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
2864     // and the result is tested to see whether the arraycopy stub should
2865     // be called.
2866 
2867     // When we test arraycopy this way, we must generate extra code in the
2868     // arraycopy methods callable from C2 generic_arraycopy to set the
2869     // status to 0 for those who always succeed (calling the slow path stub might
2870     // lead to errors since the copy has already been performed).
2871 
2872     status = true; // generate a status compatible with C1 calls
2873 #endif
2874 
2875     // these need always status in case they are called from generic_arraycopy
2876     StubRoutines::_jbyte_disjoint_arraycopy  = generate_primitive_copy(false, "jbyte_disjoint_arraycopy",  true, 1, true);
2877     StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true);
2878     StubRoutines::_jint_disjoint_arraycopy   = generate_primitive_copy(false, "jint_disjoint_arraycopy",   true, 4, true);
2879     StubRoutines::_jlong_disjoint_arraycopy  = generate_primitive_copy(false, "jlong_disjoint_arraycopy",  true, 8, true);
2880     StubRoutines::_oop_disjoint_arraycopy    = generate_oop_copy      (false, "oop_disjoint_arraycopy",    true,    true);
2881 
2882     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = generate_primitive_copy(true, "arrayof_jbyte_disjoint_arraycopy", status, 1, true);
2883     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jshort_disjoint_arraycopy",status, 2, true);
2884     StubRoutines::_arrayof_jint_disjoint_arraycopy   = generate_primitive_copy(true, "arrayof_jint_disjoint_arraycopy",  status, 4, true);
2885     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = generate_primitive_copy(true, "arrayof_jlong_disjoint_arraycopy", status, 8, true);
2886     StubRoutines::_arrayof_oop_disjoint_arraycopy    = generate_oop_copy      (true, "arrayof_oop_disjoint_arraycopy",   status,    true);
2887 
2888     // these need always status in case they are called from generic_arraycopy
2889     StubRoutines::_jbyte_arraycopy  = generate_primitive_copy(false, "jbyte_arraycopy",  true, 1, false, StubRoutines::_jbyte_disjoint_arraycopy);
2890     StubRoutines::_jshort_arraycopy = generate_primitive_copy(false, "jshort_arraycopy", true, 2, false, StubRoutines::_jshort_disjoint_arraycopy);
2891     StubRoutines::_jint_arraycopy   = generate_primitive_copy(false, "jint_arraycopy",   true, 4, false, StubRoutines::_jint_disjoint_arraycopy);
2892     StubRoutines::_jlong_arraycopy  = generate_primitive_copy(false, "jlong_arraycopy",  true, 8, false, StubRoutines::_jlong_disjoint_arraycopy);
2893     StubRoutines::_oop_arraycopy    = generate_oop_copy      (false, "oop_arraycopy",    true,    false, StubRoutines::_oop_disjoint_arraycopy);
2894 
2895     StubRoutines::_arrayof_jbyte_arraycopy    = generate_primitive_copy(true, "arrayof_jbyte_arraycopy",  status, 1, false, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
2896     StubRoutines::_arrayof_jshort_arraycopy   = generate_primitive_copy(true, "arrayof_jshort_arraycopy", status, 2, false, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
2897 #ifdef _LP64
2898     // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
2899     StubRoutines::_arrayof_jint_arraycopy     = generate_primitive_copy(true, "arrayof_jint_arraycopy",   status, 4, false, StubRoutines::_arrayof_jint_disjoint_arraycopy);
2900 #else
2901     StubRoutines::_arrayof_jint_arraycopy     = StubRoutines::_jint_arraycopy;
2902 #endif
2903     if (BytesPerHeapOop < HeapWordSize) {
2904       StubRoutines::_arrayof_oop_arraycopy    = generate_oop_copy      (true, "arrayof_oop_arraycopy",    status,    false, StubRoutines::_arrayof_oop_disjoint_arraycopy);
2905     } else {
2906       StubRoutines::_arrayof_oop_arraycopy    = StubRoutines::_oop_arraycopy;
2907     }
2908     StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
2909 
2910     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2911     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
2912     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
2913 
2914 
2915   }
2916 
2917 #define COMPILE_CRYPTO
2918 #include "stubRoutinesCrypto_arm.cpp"
2919 
2920  private:
2921 
2922 #undef  __
2923 #define __ masm->
2924 
2925   //------------------------------------------------------------------------------------------------------------------------
2926   // Continuation point for throwing of implicit exceptions that are not handled in
2927   // the current activation. Fabricates an exception oop and initiates normal
2928   // exception dispatching in this frame.
2929   address generate_throw_exception(const char* name, address runtime_entry) {
2930     int insts_size = 128;
2931     int locs_size  = 32;
2932     CodeBuffer code(name, insts_size, locs_size);
2933     OopMapSet* oop_maps;
2934     int frame_size;
2935     int frame_complete;
2936 
2937     oop_maps = new OopMapSet();
2938     MacroAssembler* masm = new MacroAssembler(&code);
2939 
2940     address start = __ pc();
2941 
2942     frame_size = 2;
2943     __ mov(Rexception_pc, LR);
2944     __ raw_push(FP, LR);
2945 
2946     frame_complete = __ pc() - start;
2947 
2948     // Any extra arguments are already supposed to be R1 and R2
2949     __ mov(R0, Rthread);
2950 
2951     int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
2952     assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
2953     __ call(runtime_entry);
2954     if (pc_offset == -1) {
2955       pc_offset = __ offset();
2956     }
2957 
2958     // Generate oop map
2959     OopMap* map =  new OopMap(frame_size*VMRegImpl::slots_per_word, 0);
2960     oop_maps->add_gc_map(pc_offset, map);
2961     __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
2962 
2963     __ raw_pop(FP, LR);
2964     __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
2965 
2966     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2967                                                       frame_size, oop_maps, false);
2968     return stub->entry_point();
2969   }
2970 
2971   //---------------------------------------------------------------------------
2972   // Initialization
2973 
2974   void generate_initial() {
2975     // Generates all stubs and initializes the entry points
2976 
2977     //------------------------------------------------------------------------------------------------------------------------
2978     // entry points that exist in all platforms
2979     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2980     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2981     StubRoutines::_forward_exception_entry      = generate_forward_exception();
2982 
2983     StubRoutines::_call_stub_entry              =
2984       generate_call_stub(StubRoutines::_call_stub_return_address);
2985     // is referenced by megamorphic call
2986     StubRoutines::_catch_exception_entry        = generate_catch_exception();
2987 
2988     // stub for throwing stack overflow error used both by interpreter and compiler
2989     StubRoutines::_throw_StackOverflowError_entry  = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
2990 
2991     // integer division used both by interpreter and compiler
2992     StubRoutines::Arm::_idiv_irem_entry = generate_idiv_irem();
2993 
2994     StubRoutines::_atomic_add_entry = generate_atomic_add();
2995     StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2996     StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2997     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2998     StubRoutines::_atomic_load_long_entry = generate_atomic_load_long();
2999     StubRoutines::_atomic_store_long_entry = generate_atomic_store_long();
3000   }
3001 
3002   void generate_all() {
3003     // Generates all stubs and initializes the entry points
3004 
3005 #ifdef COMPILER2
3006     // Generate partial_subtype_check first here since its code depends on
3007     // UseZeroBaseCompressedOops which is defined after heap initialization.
3008     StubRoutines::Arm::_partial_subtype_check                = generate_partial_subtype_check();
3009 #endif
3010     // These entry points require SharedInfo::stack0 to be set up in non-core builds
3011     // and need to be relocatable, so they each fabricate a RuntimeStub internally.
3012     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
3013     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
3014     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
3015 
3016     //------------------------------------------------------------------------------------------------------------------------
3017     // entry points that are platform specific
3018 
3019     // support for verify_oop (must happen after universe_init)
3020     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
3021 
3022     // arraycopy stubs used by compilers
3023     generate_arraycopy_stubs();
3024 
3025     // Safefetch stubs.
3026     generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
3027                                                    &StubRoutines::_safefetch32_fault_pc,
3028                                                    &StubRoutines::_safefetch32_continuation_pc);
3029     assert (sizeof(int) == wordSize, "32-bit architecture");
3030     StubRoutines::_safefetchN_entry           = StubRoutines::_safefetch32_entry;
3031     StubRoutines::_safefetchN_fault_pc        = StubRoutines::_safefetch32_fault_pc;
3032     StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
3033 
3034 #ifdef COMPILE_CRYPTO
3035     // generate AES intrinsics code
3036     if (UseAESIntrinsics) {
3037       aes_init();
3038       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3039       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3040       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
3041       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
3042     }
3043 #endif // COMPILE_CRYPTO
3044   }
3045 
3046 
3047  public:
3048   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3049     if (all) {
3050       generate_all();
3051     } else {
3052       generate_initial();
3053     }
3054   }
3055 }; // end class declaration
3056 
3057 void StubGenerator_generate(CodeBuffer* code, bool all) {
3058   StubGenerator g(code, all);
3059 }