1 /*
   2  * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "opto/arraycopynode.hpp"
  28 #include "oops/objArrayKlass.hpp"
  29 #include "opto/convertnode.hpp"
  30 #include "opto/graphKit.hpp"
  31 #include "opto/macro.hpp"
  32 #include "opto/runtime.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 
  36 void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent) {
  37   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
  38   mb->init_req(TypeFunc::Control, *ctrl);
  39   mb->init_req(TypeFunc::Memory, *mem);
  40   transform_later(mb);
  41   *ctrl = new ProjNode(mb,TypeFunc::Control);
  42   transform_later(*ctrl);
  43   Node* mem_proj = new ProjNode(mb,TypeFunc::Memory);
  44   transform_later(mem_proj);
  45   *mem = mem_proj;
  46 }
  47 
  48 Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt) {
  49   uint shift  = exact_log2(type2aelembytes(elembt));
  50   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
  51   Node* base =  basic_plus_adr(ary, header);
  52 #ifdef _LP64
  53   // see comment in GraphKit::array_element_address
  54   int index_max = max_jint - 1;  // array size is max_jint, index is one less
  55   const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
  56   idx = transform_later( new ConvI2LNode(idx, lidxtype) );
  57 #endif
  58   Node* scale = new LShiftXNode(idx, intcon(shift));
  59   transform_later(scale);
  60   return basic_plus_adr(ary, base, scale);
  61 }
  62 
  63 Node* PhaseMacroExpand::ConvI2L(Node* offset) {
  64   return transform_later(new ConvI2LNode(offset));
  65 }
  66 
  67 Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem,
  68                                        const TypeFunc* call_type, address call_addr,
  69                                        const char* call_name,
  70                                        const TypePtr* adr_type,
  71                                        Node* parm0, Node* parm1,
  72                                        Node* parm2, Node* parm3,
  73                                        Node* parm4, Node* parm5,
  74                                        Node* parm6, Node* parm7) {
  75   Node* call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
  76   call->init_req(TypeFunc::Control, ctrl);
  77   call->init_req(TypeFunc::I_O    , top());
  78   call->init_req(TypeFunc::Memory , mem);
  79   call->init_req(TypeFunc::ReturnAdr, top());
  80   call->init_req(TypeFunc::FramePtr, top());
  81 
  82   // Hook each parm in order.  Stop looking at the first NULL.
  83   if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
  84   if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
  85   if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
  86   if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
  87   if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
  88   if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
  89   if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
  90   if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
  91     /* close each nested if ===> */  } } } } } } } }
  92   assert(call->in(call->req()-1) != NULL, "must initialize all parms");
  93 
  94   return call;
  95 }
  96 
  97 
  98 //------------------------------generate_guard---------------------------
  99 // Helper function for generating guarded fast-slow graph structures.
 100 // The given 'test', if true, guards a slow path.  If the test fails
 101 // then a fast path can be taken.  (We generally hope it fails.)
 102 // In all cases, GraphKit::control() is updated to the fast path.
 103 // The returned value represents the control for the slow path.
 104 // The return value is never 'top'; it is either a valid control
 105 // or NULL if it is obvious that the slow path can never be taken.
 106 // Also, if region and the slow control are not NULL, the slow edge
 107 // is appended to the region.
 108 Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) {
 109   if ((*ctrl)->is_top()) {
 110     // Already short circuited.
 111     return NULL;
 112   }
 113   // Build an if node and its projections.
 114   // If test is true we take the slow path, which we assume is uncommon.
 115   if (_igvn.type(test) == TypeInt::ZERO) {
 116     // The slow branch is never taken.  No need to build this guard.
 117     return NULL;
 118   }
 119 
 120   IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN);
 121   transform_later(iff);
 122 
 123   Node* if_slow = new IfTrueNode(iff);
 124   transform_later(if_slow);
 125 
 126   if (region != NULL) {
 127     region->add_req(if_slow);
 128   }
 129 
 130   Node* if_fast = new IfFalseNode(iff);
 131   transform_later(if_fast);
 132 
 133   *ctrl = if_fast;
 134 
 135   return if_slow;
 136 }
 137 
 138 inline Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) {
 139   return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3));
 140 }
 141 
 142 void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) {
 143   if ((*ctrl)->is_top())
 144     return;                // already stopped
 145   if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
 146     return;                // index is already adequately typed
 147   Node* cmp_lt = new CmpINode(index, intcon(0));
 148   transform_later(cmp_lt);
 149   Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt);
 150   transform_later(bol_lt);
 151   generate_guard(ctrl, bol_lt, region, PROB_MIN);
 152 }
 153 
 154 void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) {
 155   if ((*ctrl)->is_top())
 156     return;                // already stopped
 157   bool zero_offset = _igvn.type(offset) == TypeInt::ZERO;
 158   if (zero_offset && subseq_length->eqv_uncast(array_length))
 159     return;                // common case of whole-array copy
 160   Node* last = subseq_length;
 161   if (!zero_offset) {            // last += offset
 162     last = new AddINode(last, offset);
 163     transform_later(last);
 164   }
 165   Node* cmp_lt = new CmpUNode(array_length, last);
 166   transform_later(cmp_lt);
 167   Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt);
 168   transform_later(bol_lt);
 169   generate_guard(ctrl, bol_lt, region, PROB_MIN);
 170 }
 171 
 172 Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) {
 173   if ((*ctrl)->is_top())  return NULL;
 174 
 175   if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
 176     return NULL;                // index is already adequately typed
 177   Node* cmp_le = new CmpINode(index, intcon(0));
 178   transform_later(cmp_le);
 179   BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
 180   Node* bol_le = new BoolNode(cmp_le, le_or_eq);
 181   transform_later(bol_le);
 182   Node* is_notp = generate_guard(ctrl, bol_le, NULL, PROB_MIN);
 183 
 184   return is_notp;
 185 }
 186 
 187 void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) {
 188   transform_later(call);
 189 
 190   *ctrl = new ProjNode(call,TypeFunc::Control);
 191   transform_later(*ctrl);
 192   Node* newmem = new ProjNode(call, TypeFunc::Memory);
 193   transform_later(newmem);
 194 
 195   uint alias_idx = C->get_alias_index(adr_type);
 196   if (alias_idx != Compile::AliasIdxBot) {
 197     *mem = MergeMemNode::make(*mem);
 198     (*mem)->set_memory_at(alias_idx, newmem);
 199   } else {
 200     *mem = MergeMemNode::make(newmem);
 201   }
 202   transform_later(*mem);
 203 }
 204 
 205 address PhaseMacroExpand::basictype2arraycopy(BasicType t,
 206                                               Node* src_offset,
 207                                               Node* dest_offset,
 208                                               bool disjoint_bases,
 209                                               const char* &name,
 210                                               bool dest_uninitialized) {
 211   const TypeInt* src_offset_inttype  = _igvn.find_int_type(src_offset);;
 212   const TypeInt* dest_offset_inttype = _igvn.find_int_type(dest_offset);;
 213 
 214   bool aligned = false;
 215   bool disjoint = disjoint_bases;
 216 
 217   // if the offsets are the same, we can treat the memory regions as
 218   // disjoint, because either the memory regions are in different arrays,
 219   // or they are identical (which we can treat as disjoint.)  We can also
 220   // treat a copy with a destination index  less that the source index
 221   // as disjoint since a low->high copy will work correctly in this case.
 222   if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
 223       dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
 224     // both indices are constants
 225     int s_offs = src_offset_inttype->get_con();
 226     int d_offs = dest_offset_inttype->get_con();
 227     int element_size = type2aelembytes(t);
 228     aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
 229               ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
 230     if (s_offs >= d_offs)  disjoint = true;
 231   } else if (src_offset == dest_offset && src_offset != NULL) {
 232     // This can occur if the offsets are identical non-constants.
 233     disjoint = true;
 234   }
 235 
 236   return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
 237 }
 238 
 239 #define XTOP LP64_ONLY(COMMA top())
 240 
 241 // Generate an optimized call to arraycopy.
 242 // Caller must guard against non-arrays.
 243 // Caller must determine a common array basic-type for both arrays.
 244 // Caller must validate offsets against array bounds.
 245 // The slow_region has already collected guard failure paths
 246 // (such as out of bounds length or non-conformable array types).
 247 // The generated code has this shape, in general:
 248 //
 249 //     if (length == 0)  return   // via zero_path
 250 //     slowval = -1
 251 //     if (types unknown) {
 252 //       slowval = call generic copy loop
 253 //       if (slowval == 0)  return  // via checked_path
 254 //     } else if (indexes in bounds) {
 255 //       if ((is object array) && !(array type check)) {
 256 //         slowval = call checked copy loop
 257 //         if (slowval == 0)  return  // via checked_path
 258 //       } else {
 259 //         call bulk copy loop
 260 //         return  // via fast_path
 261 //       }
 262 //     }
 263 //     // adjust params for remaining work:
 264 //     if (slowval != -1) {
 265 //       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
 266 //     }
 267 //   slow_region:
 268 //     call slow arraycopy(src, src_offset, dest, dest_offset, length)
 269 //     return  // via slow_call_path
 270 //
 271 // This routine is used from several intrinsics:  System.arraycopy,
 272 // Object.clone (the array subcase), and Arrays.copyOf[Range].
 273 //
 274 Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc,
 275                                            Node** ctrl, MergeMemNode* mem, Node** io,
 276                                            const TypePtr* adr_type,
 277                                            BasicType basic_elem_type,
 278                                            Node* src,  Node* src_offset,
 279                                            Node* dest, Node* dest_offset,
 280                                            Node* copy_length,
 281                                            bool disjoint_bases,
 282                                            bool length_never_negative,
 283                                            RegionNode* slow_region) {
 284   if (slow_region == NULL) {
 285     slow_region = new RegionNode(1);
 286     transform_later(slow_region);
 287   }
 288 
 289   Node* original_dest      = dest;
 290   bool  dest_uninitialized = false;
 291 
 292   // See if this is the initialization of a newly-allocated array.
 293   // If so, we will take responsibility here for initializing it to zero.
 294   // (Note:  Because tightly_coupled_allocation performs checks on the
 295   // out-edges of the dest, we need to avoid making derived pointers
 296   // from it until we have checked its uses.)
 297   if (ReduceBulkZeroing
 298       && !(UseTLAB && ZeroTLAB) // pointless if already zeroed
 299       && basic_elem_type != T_CONFLICT // avoid corner case
 300       && !src->eqv_uncast(dest)
 301       && alloc != NULL
 302       && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
 303       && alloc->maybe_set_complete(&_igvn)) {
 304     // "You break it, you buy it."
 305     InitializeNode* init = alloc->initialization();
 306     assert(init->is_complete(), "we just did this");
 307     init->set_complete_with_arraycopy();
 308     assert(dest->is_CheckCastPP(), "sanity");
 309     assert(dest->in(0)->in(0) == init, "dest pinned");
 310     adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
 311     // From this point on, every exit path is responsible for
 312     // initializing any non-copied parts of the object to zero.
 313     // Also, if this flag is set we make sure that arraycopy interacts properly
 314     // with G1, eliding pre-barriers. See CR 6627983.
 315     dest_uninitialized = true;
 316   } else {
 317     // No zeroing elimination here.
 318     alloc             = NULL;
 319     //original_dest   = dest;
 320     //dest_uninitialized = false;
 321   }
 322 
 323   uint alias_idx = C->get_alias_index(adr_type);
 324 
 325   // Results are placed here:
 326   enum { fast_path        = 1,  // normal void-returning assembly stub
 327          checked_path     = 2,  // special assembly stub with cleanup
 328          slow_call_path   = 3,  // something went wrong; call the VM
 329          zero_path        = 4,  // bypass when length of copy is zero
 330          bcopy_path       = 5,  // copy primitive array by 64-bit blocks
 331          PATH_LIMIT       = 6
 332   };
 333   RegionNode* result_region = new RegionNode(PATH_LIMIT);
 334   PhiNode*    result_i_o    = new PhiNode(result_region, Type::ABIO);
 335   PhiNode*    result_memory = new PhiNode(result_region, Type::MEMORY, adr_type);
 336   assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
 337   transform_later(result_region);
 338   transform_later(result_i_o);
 339   transform_later(result_memory);
 340 
 341   // The slow_control path:
 342   Node* slow_control;
 343   Node* slow_i_o = *io;
 344   Node* slow_mem = mem->memory_at(alias_idx);
 345   DEBUG_ONLY(slow_control = (Node*) badAddress);
 346 
 347   // Checked control path:
 348   Node* checked_control = top();
 349   Node* checked_mem     = NULL;
 350   Node* checked_i_o     = NULL;
 351   Node* checked_value   = NULL;
 352 
 353   if (basic_elem_type == T_CONFLICT) {
 354     assert(!dest_uninitialized, "");
 355     Node* cv = generate_generic_arraycopy(ctrl, &mem,
 356                                           adr_type,
 357                                           src, src_offset, dest, dest_offset,
 358                                           copy_length, dest_uninitialized);
 359     if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
 360     checked_control = *ctrl;
 361     checked_i_o     = *io;
 362     checked_mem     = mem->memory_at(alias_idx);
 363     checked_value   = cv;
 364     *ctrl = top();
 365   }
 366 
 367   Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative);
 368   if (not_pos != NULL) {
 369     Node* local_ctrl = not_pos, *local_io = *io;
 370     MergeMemNode* local_mem = MergeMemNode::make(mem);
 371     transform_later(local_mem);
 372 
 373     // (6) length must not be negative.
 374     if (!length_never_negative) {
 375       generate_negative_guard(&local_ctrl, copy_length, slow_region);
 376     }
 377 
 378     // copy_length is 0.
 379     if (dest_uninitialized) {
 380       assert(!local_ctrl->is_top(), "no ctrl?");
 381       Node* dest_length = alloc->in(AllocateNode::ALength);
 382       if (copy_length->eqv_uncast(dest_length)
 383           || _igvn.find_int_con(dest_length, 1) <= 0) {
 384         // There is no zeroing to do. No need for a secondary raw memory barrier.
 385       } else {
 386         // Clear the whole thing since there are no source elements to copy.
 387         generate_clear_array(local_ctrl, local_mem,
 388                              adr_type, dest, basic_elem_type,
 389                              intcon(0), NULL,
 390                              alloc->in(AllocateNode::AllocSize));
 391         // Use a secondary InitializeNode as raw memory barrier.
 392         // Currently it is needed only on this path since other
 393         // paths have stub or runtime calls as raw memory barriers.
 394         MemBarNode* mb = MemBarNode::make(C, Op_Initialize,
 395                                           Compile::AliasIdxRaw,
 396                                           top());
 397         transform_later(mb);
 398         mb->set_req(TypeFunc::Control,local_ctrl);
 399         mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw));
 400         local_ctrl = transform_later(new ProjNode(mb, TypeFunc::Control));
 401         local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new ProjNode(mb, TypeFunc::Memory)));
 402 
 403         InitializeNode* init = mb->as_Initialize();
 404         init->set_complete(&_igvn);  // (there is no corresponding AllocateNode)
 405       }
 406     }
 407 
 408     // Present the results of the fast call.
 409     result_region->init_req(zero_path, local_ctrl);
 410     result_i_o   ->init_req(zero_path, local_io);
 411     result_memory->init_req(zero_path, local_mem->memory_at(alias_idx));
 412   }
 413 
 414   if (!(*ctrl)->is_top() && dest_uninitialized) {
 415     // We have to initialize the *uncopied* part of the array to zero.
 416     // The copy destination is the slice dest[off..off+len].  The other slices
 417     // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
 418     Node* dest_size   = alloc->in(AllocateNode::AllocSize);
 419     Node* dest_length = alloc->in(AllocateNode::ALength);
 420     Node* dest_tail   = transform_later( new AddINode(dest_offset, copy_length));
 421 
 422     // If there is a head section that needs zeroing, do it now.
 423     if (_igvn.find_int_con(dest_offset, -1) != 0) {
 424       generate_clear_array(*ctrl, mem,
 425                            adr_type, dest, basic_elem_type,
 426                            intcon(0), dest_offset,
 427                            NULL);
 428     }
 429 
 430     // Next, perform a dynamic check on the tail length.
 431     // It is often zero, and we can win big if we prove this.
 432     // There are two wins:  Avoid generating the ClearArray
 433     // with its attendant messy index arithmetic, and upgrade
 434     // the copy to a more hardware-friendly word size of 64 bits.
 435     Node* tail_ctl = NULL;
 436     if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) {
 437       Node* cmp_lt   = transform_later( new CmpINode(dest_tail, dest_length) );
 438       Node* bol_lt   = transform_later( new BoolNode(cmp_lt, BoolTest::lt) );
 439       tail_ctl = generate_slow_guard(ctrl, bol_lt, NULL);
 440       assert(tail_ctl != NULL || !(*ctrl)->is_top(), "must be an outcome");
 441     }
 442 
 443     // At this point, let's assume there is no tail.
 444     if (!(*ctrl)->is_top() && alloc != NULL && basic_elem_type != T_OBJECT) {
 445       // There is no tail.  Try an upgrade to a 64-bit copy.
 446       bool didit = false;
 447       {
 448         Node* local_ctrl = *ctrl, *local_io = *io;
 449         MergeMemNode* local_mem = MergeMemNode::make(mem);
 450         transform_later(local_mem);
 451 
 452         didit = generate_block_arraycopy(&local_ctrl, &local_mem, local_io,
 453                                          adr_type, basic_elem_type, alloc,
 454                                          src, src_offset, dest, dest_offset,
 455                                          dest_size, dest_uninitialized);
 456         if (didit) {
 457           // Present the results of the block-copying fast call.
 458           result_region->init_req(bcopy_path, local_ctrl);
 459           result_i_o   ->init_req(bcopy_path, local_io);
 460           result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx));
 461         }
 462       }
 463       if (didit) {
 464         *ctrl = top();     // no regular fast path
 465       }
 466     }
 467 
 468     // Clear the tail, if any.
 469     if (tail_ctl != NULL) {
 470       Node* notail_ctl = (*ctrl)->is_top() ? NULL : *ctrl;
 471       *ctrl = tail_ctl;
 472       if (notail_ctl == NULL) {
 473         generate_clear_array(*ctrl, mem,
 474                              adr_type, dest, basic_elem_type,
 475                              dest_tail, NULL,
 476                              dest_size);
 477       } else {
 478         // Make a local merge.
 479         Node* done_ctl = transform_later(new RegionNode(3));
 480         Node* done_mem = transform_later(new PhiNode(done_ctl, Type::MEMORY, adr_type));
 481         done_ctl->init_req(1, notail_ctl);
 482         done_mem->init_req(1, mem->memory_at(alias_idx));
 483         generate_clear_array(*ctrl, mem,
 484                              adr_type, dest, basic_elem_type,
 485                              dest_tail, NULL,
 486                              dest_size);
 487         done_ctl->init_req(2, *ctrl);
 488         done_mem->init_req(2, mem->memory_at(alias_idx));
 489         *ctrl = done_ctl;
 490         mem->set_memory_at(alias_idx, done_mem);
 491       }
 492     }
 493   }
 494 
 495   BasicType copy_type = basic_elem_type;
 496   assert(basic_elem_type != T_ARRAY, "caller must fix this");
 497   if (!(*ctrl)->is_top() && copy_type == T_OBJECT) {
 498     // If src and dest have compatible element types, we can copy bits.
 499     // Types S[] and D[] are compatible if D is a supertype of S.
 500     //
 501     // If they are not, we will use checked_oop_disjoint_arraycopy,
 502     // which performs a fast optimistic per-oop check, and backs off
 503     // further to JVM_ArrayCopy on the first per-oop check that fails.
 504     // (Actually, we don't move raw bits only; the GC requires card marks.)
 505 
 506     // We don't need a subtype check for validated copies and Object[].clone()
 507     bool skip_subtype_check = ac->is_arraycopy_validated() || ac->is_copyof_validated() ||
 508                               ac->is_copyofrange_validated() || ac->is_cloneoop();
 509     if (!skip_subtype_check) {
 510       // Get the klass* for both src and dest
 511       Node* src_klass  = ac->in(ArrayCopyNode::SrcKlass);
 512       Node* dest_klass = ac->in(ArrayCopyNode::DestKlass);
 513 
 514       assert(src_klass != NULL && dest_klass != NULL, "should have klasses");
 515 
 516       // Generate the subtype check.
 517       // This might fold up statically, or then again it might not.
 518       //
 519       // Non-static example:  Copying List<String>.elements to a new String[].
 520       // The backing store for a List<String> is always an Object[],
 521       // but its elements are always type String, if the generic types
 522       // are correct at the source level.
 523       //
 524       // Test S[] against D[], not S against D, because (probably)
 525       // the secondary supertype cache is less busy for S[] than S.
 526       // This usually only matters when D is an interface.
 527       Node* not_subtype_ctrl = Phase::gen_subtype_check(src_klass, dest_klass, ctrl, mem, &_igvn);
 528       // Plug failing path into checked_oop_disjoint_arraycopy
 529       if (not_subtype_ctrl != top()) {
 530         Node* local_ctrl = not_subtype_ctrl;
 531         MergeMemNode* local_mem = MergeMemNode::make(mem);
 532         transform_later(local_mem);
 533 
 534         // (At this point we can assume disjoint_bases, since types differ.)
 535         int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
 536         Node* p1 = basic_plus_adr(dest_klass, ek_offset);
 537         Node* n1 = LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
 538         Node* dest_elem_klass = transform_later(n1);
 539         Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
 540                                                 adr_type,
 541                                                 dest_elem_klass,
 542                                                 src, src_offset, dest, dest_offset,
 543                                                 ConvI2X(copy_length), dest_uninitialized);
 544         if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
 545         checked_control = local_ctrl;
 546         checked_i_o     = *io;
 547         checked_mem     = local_mem->memory_at(alias_idx);
 548         checked_value   = cv;
 549       }
 550     }
 551     // At this point we know we do not need type checks on oop stores.
 552 
 553     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 554     if (!bs->array_copy_requires_gc_barriers(alloc != NULL, copy_type, false, BarrierSetC2::Expansion)) {
 555       // If we do not need gc barriers, copy using the jint or jlong stub.
 556       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
 557       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
 558              "sizes agree");
 559     }
 560   }
 561 
 562   if (!(*ctrl)->is_top()) {
 563     // Generate the fast path, if possible.
 564     Node* local_ctrl = *ctrl;
 565     MergeMemNode* local_mem = MergeMemNode::make(mem);
 566     transform_later(local_mem);
 567 
 568     generate_unchecked_arraycopy(&local_ctrl, &local_mem,
 569                                  adr_type, copy_type, disjoint_bases,
 570                                  src, src_offset, dest, dest_offset,
 571                                  ConvI2X(copy_length), dest_uninitialized);
 572 
 573     // Present the results of the fast call.
 574     result_region->init_req(fast_path, local_ctrl);
 575     result_i_o   ->init_req(fast_path, *io);
 576     result_memory->init_req(fast_path, local_mem->memory_at(alias_idx));
 577   }
 578 
 579   // Here are all the slow paths up to this point, in one bundle:
 580   assert(slow_region != NULL, "allocated on entry");
 581   slow_control = slow_region;
 582   DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
 583 
 584   *ctrl = checked_control;
 585   if (!(*ctrl)->is_top()) {
 586     // Clean up after the checked call.
 587     // The returned value is either 0 or -1^K,
 588     // where K = number of partially transferred array elements.
 589     Node* cmp = new CmpINode(checked_value, intcon(0));
 590     transform_later(cmp);
 591     Node* bol = new BoolNode(cmp, BoolTest::eq);
 592     transform_later(bol);
 593     IfNode* iff = new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN);
 594     transform_later(iff);
 595 
 596     // If it is 0, we are done, so transfer to the end.
 597     Node* checks_done = new IfTrueNode(iff);
 598     transform_later(checks_done);
 599     result_region->init_req(checked_path, checks_done);
 600     result_i_o   ->init_req(checked_path, checked_i_o);
 601     result_memory->init_req(checked_path, checked_mem);
 602 
 603     // If it is not zero, merge into the slow call.
 604     *ctrl = new IfFalseNode(iff);
 605     transform_later(*ctrl);
 606     RegionNode* slow_reg2 = new RegionNode(3);
 607     PhiNode*    slow_i_o2 = new PhiNode(slow_reg2, Type::ABIO);
 608     PhiNode*    slow_mem2 = new PhiNode(slow_reg2, Type::MEMORY, adr_type);
 609     transform_later(slow_reg2);
 610     transform_later(slow_i_o2);
 611     transform_later(slow_mem2);
 612     slow_reg2  ->init_req(1, slow_control);
 613     slow_i_o2  ->init_req(1, slow_i_o);
 614     slow_mem2  ->init_req(1, slow_mem);
 615     slow_reg2  ->init_req(2, *ctrl);
 616     slow_i_o2  ->init_req(2, checked_i_o);
 617     slow_mem2  ->init_req(2, checked_mem);
 618 
 619     slow_control = slow_reg2;
 620     slow_i_o     = slow_i_o2;
 621     slow_mem     = slow_mem2;
 622 
 623     if (alloc != NULL) {
 624       // We'll restart from the very beginning, after zeroing the whole thing.
 625       // This can cause double writes, but that's OK since dest is brand new.
 626       // So we ignore the low 31 bits of the value returned from the stub.
 627     } else {
 628       // We must continue the copy exactly where it failed, or else
 629       // another thread might see the wrong number of writes to dest.
 630       Node* checked_offset = new XorINode(checked_value, intcon(-1));
 631       Node* slow_offset    = new PhiNode(slow_reg2, TypeInt::INT);
 632       transform_later(checked_offset);
 633       transform_later(slow_offset);
 634       slow_offset->init_req(1, intcon(0));
 635       slow_offset->init_req(2, checked_offset);
 636 
 637       // Adjust the arguments by the conditionally incoming offset.
 638       Node* src_off_plus  = new AddINode(src_offset,  slow_offset);
 639       transform_later(src_off_plus);
 640       Node* dest_off_plus = new AddINode(dest_offset, slow_offset);
 641       transform_later(dest_off_plus);
 642       Node* length_minus  = new SubINode(copy_length, slow_offset);
 643       transform_later(length_minus);
 644 
 645       // Tweak the node variables to adjust the code produced below:
 646       src_offset  = src_off_plus;
 647       dest_offset = dest_off_plus;
 648       copy_length = length_minus;
 649     }
 650   }
 651   *ctrl = slow_control;
 652   if (!(*ctrl)->is_top()) {
 653     Node* local_ctrl = *ctrl, *local_io = slow_i_o;
 654     MergeMemNode* local_mem = MergeMemNode::make(mem);
 655     transform_later(local_mem);
 656 
 657     // Generate the slow path, if needed.
 658     local_mem->set_memory_at(alias_idx, slow_mem);
 659 
 660     if (dest_uninitialized) {
 661       generate_clear_array(local_ctrl, local_mem,
 662                            adr_type, dest, basic_elem_type,
 663                            intcon(0), NULL,
 664                            alloc->in(AllocateNode::AllocSize));
 665     }
 666 
 667     local_mem = generate_slow_arraycopy(ac,
 668                                         &local_ctrl, local_mem, &local_io,
 669                                         adr_type,
 670                                         src, src_offset, dest, dest_offset,
 671                                         copy_length, /*dest_uninitialized*/false);
 672 
 673     result_region->init_req(slow_call_path, local_ctrl);
 674     result_i_o   ->init_req(slow_call_path, local_io);
 675     result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx));
 676   } else {
 677     ShouldNotReachHere(); // no call to generate_slow_arraycopy:
 678                           // projections were not extracted
 679   }
 680 
 681   // Remove unused edges.
 682   for (uint i = 1; i < result_region->req(); i++) {
 683     if (result_region->in(i) == NULL) {
 684       result_region->init_req(i, top());
 685     }
 686   }
 687 
 688   // Finished; return the combined state.
 689   *ctrl = result_region;
 690   *io = result_i_o;
 691   mem->set_memory_at(alias_idx, result_memory);
 692 
 693   // mem no longer guaranteed to stay a MergeMemNode
 694   Node* out_mem = mem;
 695   DEBUG_ONLY(mem = NULL);
 696 
 697   // The memory edges above are precise in order to model effects around
 698   // array copies accurately to allow value numbering of field loads around
 699   // arraycopy.  Such field loads, both before and after, are common in Java
 700   // collections and similar classes involving header/array data structures.
 701   //
 702   // But with low number of register or when some registers are used or killed
 703   // by arraycopy calls it causes registers spilling on stack. See 6544710.
 704   // The next memory barrier is added to avoid it. If the arraycopy can be
 705   // optimized away (which it can, sometimes) then we can manually remove
 706   // the membar also.
 707   //
 708   // Do not let reads from the cloned object float above the arraycopy.
 709   if (alloc != NULL && !alloc->initialization()->does_not_escape()) {
 710     // Do not let stores that initialize this object be reordered with
 711     // a subsequent store that would make this object accessible by
 712     // other threads.
 713     insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore);
 714   } else if (InsertMemBarAfterArraycopy) {
 715     insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder);
 716   }
 717 
 718   _igvn.replace_node(_memproj_fallthrough, out_mem);
 719   _igvn.replace_node(_ioproj_fallthrough, *io);
 720   _igvn.replace_node(_fallthroughcatchproj, *ctrl);
 721 
 722 #ifdef ASSERT
 723   const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr();
 724   if (dest_t->is_known_instance()) {
 725     ArrayCopyNode* ac = NULL;
 726     assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost");
 727     assert(ac == NULL, "no arraycopy anymore");
 728   }
 729 #endif
 730 
 731   return out_mem;
 732 }
 733 
 734 // Helper for initialization of arrays, creating a ClearArray.
 735 // It writes zero bits in [start..end), within the body of an array object.
 736 // The memory effects are all chained onto the 'adr_type' alias category.
 737 //
 738 // Since the object is otherwise uninitialized, we are free
 739 // to put a little "slop" around the edges of the cleared area,
 740 // as long as it does not go back into the array's header,
 741 // or beyond the array end within the heap.
 742 //
 743 // The lower edge can be rounded down to the nearest jint and the
 744 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
 745 //
 746 // Arguments:
 747 //   adr_type           memory slice where writes are generated
 748 //   dest               oop of the destination array
 749 //   basic_elem_type    element type of the destination
 750 //   slice_idx          array index of first element to store
 751 //   slice_len          number of elements to store (or NULL)
 752 //   dest_size          total size in bytes of the array object
 753 //
 754 // Exactly one of slice_len or dest_size must be non-NULL.
 755 // If dest_size is non-NULL, zeroing extends to the end of the object.
 756 // If slice_len is non-NULL, the slice_idx value must be a constant.
 757 void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
 758                                             const TypePtr* adr_type,
 759                                             Node* dest,
 760                                             BasicType basic_elem_type,
 761                                             Node* slice_idx,
 762                                             Node* slice_len,
 763                                             Node* dest_size) {
 764   // one or the other but not both of slice_len and dest_size:
 765   assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
 766   if (slice_len == NULL)  slice_len = top();
 767   if (dest_size == NULL)  dest_size = top();
 768 
 769   uint alias_idx = C->get_alias_index(adr_type);
 770 
 771   // operate on this memory slice:
 772   Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on
 773 
 774   // scaling and rounding of indexes:
 775   int scale = exact_log2(type2aelembytes(basic_elem_type));
 776   int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
 777   int clear_low = (-1 << scale) & (BytesPerInt  - 1);
 778   int bump_bit  = (-1 << scale) & BytesPerInt;
 779 
 780   // determine constant starts and ends
 781   const intptr_t BIG_NEG = -128;
 782   assert(BIG_NEG + 2*abase < 0, "neg enough");
 783   intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG);
 784   intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG);
 785   if (slice_len_con == 0) {
 786     return;                     // nothing to do here
 787   }
 788   intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
 789   intptr_t end_con   = _igvn.find_intptr_t_con(dest_size, -1);
 790   if (slice_idx_con >= 0 && slice_len_con >= 0) {
 791     assert(end_con < 0, "not two cons");
 792     end_con = align_up(abase + ((slice_idx_con + slice_len_con) << scale),
 793                        BytesPerLong);
 794   }
 795 
 796   if (start_con >= 0 && end_con >= 0) {
 797     // Constant start and end.  Simple.
 798     mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
 799                                        start_con, end_con, &_igvn);
 800   } else if (start_con >= 0 && dest_size != top()) {
 801     // Constant start, pre-rounded end after the tail of the array.
 802     Node* end = dest_size;
 803     mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
 804                                        start_con, end, &_igvn);
 805   } else if (start_con >= 0 && slice_len != top()) {
 806     // Constant start, non-constant end.  End needs rounding up.
 807     // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
 808     intptr_t end_base  = abase + (slice_idx_con << scale);
 809     int      end_round = (-1 << scale) & (BytesPerLong  - 1);
 810     Node*    end       = ConvI2X(slice_len);
 811     if (scale != 0)
 812       end = transform_later(new LShiftXNode(end, intcon(scale) ));
 813     end_base += end_round;
 814     end = transform_later(new AddXNode(end, MakeConX(end_base)) );
 815     end = transform_later(new AndXNode(end, MakeConX(~end_round)) );
 816     mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
 817                                        start_con, end, &_igvn);
 818   } else if (start_con < 0 && dest_size != top()) {
 819     // Non-constant start, pre-rounded end after the tail of the array.
 820     // This is almost certainly a "round-to-end" operation.
 821     Node* start = slice_idx;
 822     start = ConvI2X(start);
 823     if (scale != 0)
 824       start = transform_later(new LShiftXNode( start, intcon(scale) ));
 825     start = transform_later(new AddXNode(start, MakeConX(abase)) );
 826     if ((bump_bit | clear_low) != 0) {
 827       int to_clear = (bump_bit | clear_low);
 828       // Align up mod 8, then store a jint zero unconditionally
 829       // just before the mod-8 boundary.
 830       if (((abase + bump_bit) & ~to_clear) - bump_bit
 831           < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
 832         bump_bit = 0;
 833         assert((abase & to_clear) == 0, "array base must be long-aligned");
 834       } else {
 835         // Bump 'start' up to (or past) the next jint boundary:
 836         start = transform_later( new AddXNode(start, MakeConX(bump_bit)) );
 837         assert((abase & clear_low) == 0, "array base must be int-aligned");
 838       }
 839       // Round bumped 'start' down to jlong boundary in body of array.
 840       start = transform_later(new AndXNode(start, MakeConX(~to_clear)) );
 841       if (bump_bit != 0) {
 842         // Store a zero to the immediately preceding jint:
 843         Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) );
 844         Node* p1 = basic_plus_adr(dest, x1);
 845         mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
 846         mem = transform_later(mem);
 847       }
 848     }
 849     Node* end = dest_size; // pre-rounded
 850     mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
 851                                        start, end, &_igvn);
 852   } else {
 853     // Non-constant start, unrounded non-constant end.
 854     // (Nobody zeroes a random midsection of an array using this routine.)
 855     ShouldNotReachHere();       // fix caller
 856   }
 857 
 858   // Done.
 859   merge_mem->set_memory_at(alias_idx, mem);
 860 }
 861 
 862 bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
 863                                                 const TypePtr* adr_type,
 864                                                 BasicType basic_elem_type,
 865                                                 AllocateNode* alloc,
 866                                                 Node* src,  Node* src_offset,
 867                                                 Node* dest, Node* dest_offset,
 868                                                 Node* dest_size, bool dest_uninitialized) {
 869   // See if there is an advantage from block transfer.
 870   int scale = exact_log2(type2aelembytes(basic_elem_type));
 871   if (scale >= LogBytesPerLong)
 872     return false;               // it is already a block transfer
 873 
 874   // Look at the alignment of the starting offsets.
 875   int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
 876 
 877   intptr_t src_off_con  = (intptr_t) _igvn.find_int_con(src_offset, -1);
 878   intptr_t dest_off_con = (intptr_t) _igvn.find_int_con(dest_offset, -1);
 879   if (src_off_con < 0 || dest_off_con < 0) {
 880     // At present, we can only understand constants.
 881     return false;
 882   }
 883 
 884   intptr_t src_off  = abase + (src_off_con  << scale);
 885   intptr_t dest_off = abase + (dest_off_con << scale);
 886 
 887   if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
 888     // Non-aligned; too bad.
 889     // One more chance:  Pick off an initial 32-bit word.
 890     // This is a common case, since abase can be odd mod 8.
 891     if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
 892         ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
 893       Node* sptr = basic_plus_adr(src,  src_off);
 894       Node* dptr = basic_plus_adr(dest, dest_off);
 895       const TypePtr* s_adr_type = _igvn.type(sptr)->is_ptr();
 896       assert(s_adr_type->isa_aryptr(), "impossible slice");
 897       uint s_alias_idx = C->get_alias_index(s_adr_type);
 898       uint d_alias_idx = C->get_alias_index(adr_type);
 899       bool is_mismatched = (basic_elem_type != T_INT);
 900       Node* sval = transform_later(
 901           LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type,
 902                          TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest,
 903                          false /*unaligned*/, is_mismatched));
 904       Node* st = transform_later(
 905           StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(d_alias_idx), dptr, adr_type,
 906                           sval, T_INT, MemNode::unordered));
 907       if (is_mismatched) {
 908         st->as_Store()->set_mismatched_access();
 909       }
 910       (*mem)->set_memory_at(d_alias_idx, st);
 911       src_off += BytesPerInt;
 912       dest_off += BytesPerInt;
 913     } else {
 914       return false;
 915     }
 916   }
 917   assert(src_off % BytesPerLong == 0, "");
 918   assert(dest_off % BytesPerLong == 0, "");
 919 
 920   // Do this copy by giant steps.
 921   Node* sptr  = basic_plus_adr(src,  src_off);
 922   Node* dptr  = basic_plus_adr(dest, dest_off);
 923   Node* countx = dest_size;
 924   countx = transform_later(new SubXNode(countx, MakeConX(dest_off)));
 925   countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong)));
 926 
 927   bool disjoint_bases = true;   // since alloc != NULL
 928   generate_unchecked_arraycopy(ctrl, mem,
 929                                adr_type, T_LONG, disjoint_bases,
 930                                sptr, NULL, dptr, NULL, countx, dest_uninitialized);
 931 
 932   return true;
 933 }
 934 
 935 // Helper function; generates code for the slow case.
 936 // We make a call to a runtime method which emulates the native method,
 937 // but without the native wrapper overhead.
 938 MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac,
 939                                                         Node** ctrl, Node* mem, Node** io,
 940                                                         const TypePtr* adr_type,
 941                                                         Node* src,  Node* src_offset,
 942                                                         Node* dest, Node* dest_offset,
 943                                                         Node* copy_length, bool dest_uninitialized) {
 944   assert(!dest_uninitialized, "Invariant");
 945 
 946   const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type();
 947   CallNode* call = new CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(),
 948                                           "slow_arraycopy",
 949                                           ac->jvms()->bci(), TypePtr::BOTTOM);
 950 
 951   call->init_req(TypeFunc::Control, *ctrl);
 952   call->init_req(TypeFunc::I_O    , *io);
 953   call->init_req(TypeFunc::Memory , mem);
 954   call->init_req(TypeFunc::ReturnAdr, top());
 955   call->init_req(TypeFunc::FramePtr, top());
 956   call->init_req(TypeFunc::Parms+0, src);
 957   call->init_req(TypeFunc::Parms+1, src_offset);
 958   call->init_req(TypeFunc::Parms+2, dest);
 959   call->init_req(TypeFunc::Parms+3, dest_offset);
 960   call->init_req(TypeFunc::Parms+4, copy_length);
 961   copy_call_debug_info(ac, call);
 962 
 963   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
 964   _igvn.replace_node(ac, call);
 965   transform_later(call);
 966 
 967   extract_call_projections(call);
 968   *ctrl = _fallthroughcatchproj->clone();
 969   transform_later(*ctrl);
 970 
 971   Node* m = _memproj_fallthrough->clone();
 972   transform_later(m);
 973 
 974   uint alias_idx = C->get_alias_index(adr_type);
 975   MergeMemNode* out_mem;
 976   if (alias_idx != Compile::AliasIdxBot) {
 977     out_mem = MergeMemNode::make(mem);
 978     out_mem->set_memory_at(alias_idx, m);
 979   } else {
 980     out_mem = MergeMemNode::make(m);
 981   }
 982   transform_later(out_mem);
 983 
 984   *io = _ioproj_fallthrough->clone();
 985   transform_later(*io);
 986 
 987   return out_mem;
 988 }
 989 
 990 // Helper function; generates code for cases requiring runtime checks.
 991 Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem,
 992                                                      const TypePtr* adr_type,
 993                                                      Node* dest_elem_klass,
 994                                                      Node* src,  Node* src_offset,
 995                                                      Node* dest, Node* dest_offset,
 996                                                      Node* copy_length, bool dest_uninitialized) {
 997   if ((*ctrl)->is_top())  return NULL;
 998 
 999   address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
1000   if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
1001     return NULL;
1002   }
1003 
1004   // Pick out the parameters required to perform a store-check
1005   // for the target array.  This is an optimistic check.  It will
1006   // look in each non-null element's class, at the desired klass's
1007   // super_check_offset, for the desired klass.
1008   int sco_offset = in_bytes(Klass::super_check_offset_offset());
1009   Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
1010   Node* n3 = new LoadINode(NULL, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
1011   Node* check_offset = ConvI2X(transform_later(n3));
1012   Node* check_value  = dest_elem_klass;
1013 
1014   Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
1015   Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
1016 
1017   const TypeFunc* call_type = OptoRuntime::checkcast_arraycopy_Type();
1018   Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "checkcast_arraycopy", adr_type,
1019                               src_start, dest_start, copy_length XTOP, check_offset XTOP, check_value);
1020 
1021   finish_arraycopy_call(call, ctrl, mem, adr_type);
1022 
1023   Node* proj =  new ProjNode(call, TypeFunc::Parms);
1024   transform_later(proj);
1025 
1026   return proj;
1027 }
1028 
1029 // Helper function; generates code for cases requiring runtime checks.
1030 Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem,
1031                                                    const TypePtr* adr_type,
1032                                                    Node* src,  Node* src_offset,
1033                                                    Node* dest, Node* dest_offset,
1034                                                    Node* copy_length, bool dest_uninitialized) {
1035   if ((*ctrl)->is_top()) return NULL;
1036   assert(!dest_uninitialized, "Invariant");
1037 
1038   address copyfunc_addr = StubRoutines::generic_arraycopy();
1039   if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
1040     return NULL;
1041   }
1042 
1043   const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type();
1044   Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "generic_arraycopy", adr_type,
1045                               src, src_offset, dest, dest_offset, copy_length);
1046 
1047   finish_arraycopy_call(call, ctrl, mem, adr_type);
1048 
1049   Node* proj =  new ProjNode(call, TypeFunc::Parms);
1050   transform_later(proj);
1051 
1052   return proj;
1053 }
1054 
1055 // Helper function; generates the fast out-of-line call to an arraycopy stub.
1056 void PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem,
1057                                                     const TypePtr* adr_type,
1058                                                     BasicType basic_elem_type,
1059                                                     bool disjoint_bases,
1060                                                     Node* src,  Node* src_offset,
1061                                                     Node* dest, Node* dest_offset,
1062                                                     Node* copy_length, bool dest_uninitialized) {
1063   if ((*ctrl)->is_top()) return;
1064 
1065   Node* src_start  = src;
1066   Node* dest_start = dest;
1067   if (src_offset != NULL || dest_offset != NULL) {
1068     src_start =  array_element_address(src, src_offset, basic_elem_type);
1069     dest_start = array_element_address(dest, dest_offset, basic_elem_type);
1070   }
1071 
1072   // Figure out which arraycopy runtime method to call.
1073   const char* copyfunc_name = "arraycopy";
1074   address     copyfunc_addr =
1075       basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
1076                           disjoint_bases, copyfunc_name, dest_uninitialized);
1077 
1078   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
1079   Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, copyfunc_name, adr_type,
1080                               src_start, dest_start, copy_length XTOP);
1081 
1082   finish_arraycopy_call(call, ctrl, mem, adr_type);
1083 }
1084 
1085 void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
1086   Node* ctrl = ac->in(TypeFunc::Control);
1087   Node* io = ac->in(TypeFunc::I_O);
1088   Node* src = ac->in(ArrayCopyNode::Src);
1089   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
1090   Node* dest = ac->in(ArrayCopyNode::Dest);
1091   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
1092   Node* length = ac->in(ArrayCopyNode::Length);
1093   MergeMemNode* merge_mem = NULL;
1094 
1095   if (ac->is_clonebasic()) {
1096     assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null");
1097     Node* mem = ac->in(TypeFunc::Memory);
1098     const char* copyfunc_name = "arraycopy";
1099     address     copyfunc_addr =
1100       basictype2arraycopy(T_LONG, NULL, NULL,
1101                           true, copyfunc_name, true);
1102 
1103     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
1104     const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
1105 
1106     Node* call = make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP);
1107     transform_later(call);
1108 
1109     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1110     bs->clone_barrier_at_expansion(ac, call, _igvn);
1111 
1112     return;
1113   } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_cloneoop()) {
1114     Node* mem = ac->in(TypeFunc::Memory);
1115     merge_mem = MergeMemNode::make(mem);
1116     transform_later(merge_mem);
1117 
1118     RegionNode* slow_region = new RegionNode(1);
1119     transform_later(slow_region);
1120 
1121     AllocateArrayNode* alloc = NULL;
1122     if (ac->is_alloc_tightly_coupled()) {
1123       alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
1124       assert(alloc != NULL, "expect alloc");
1125     }
1126 
1127     const TypePtr* adr_type = _igvn.type(dest)->is_oopptr()->add_offset(Type::OffsetBot);
1128     if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1129       adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
1130     }
1131     generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
1132                        adr_type, T_OBJECT,
1133                        src, src_offset, dest, dest_offset, length,
1134                        true, !ac->is_copyofrange());
1135 
1136     return;
1137   }
1138 
1139   AllocateArrayNode* alloc = NULL;
1140   if (ac->is_alloc_tightly_coupled()) {
1141     alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
1142     assert(alloc != NULL, "expect alloc");
1143   }
1144 
1145   assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy");
1146 
1147   // Compile time checks.  If any of these checks cannot be verified at compile time,
1148   // we do not make a fast path for this call.  Instead, we let the call remain as it
1149   // is.  The checks we choose to mandate at compile time are:
1150   //
1151   // (1) src and dest are arrays.
1152   const Type* src_type = src->Value(&_igvn);
1153   const Type* dest_type = dest->Value(&_igvn);
1154   const TypeAryPtr* top_src = src_type->isa_aryptr();
1155   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
1156 
1157   BasicType src_elem = T_CONFLICT;
1158   BasicType dest_elem = T_CONFLICT;
1159 
1160   if (top_dest != NULL && top_dest->klass() != NULL) {
1161     dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
1162   }
1163   if (top_src != NULL && top_src->klass() != NULL) {
1164     src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
1165   }
1166   if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
1167   if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
1168 
1169   if (ac->is_arraycopy_validated() &&
1170       dest_elem != T_CONFLICT &&
1171       src_elem == T_CONFLICT) {
1172     src_elem = dest_elem;
1173   }
1174 
1175   if (src_elem == T_CONFLICT || dest_elem == T_CONFLICT) {
1176     // Conservatively insert a memory barrier on all memory slices.
1177     // Do not let writes into the source float below the arraycopy.
1178     {
1179       Node* mem = ac->in(TypeFunc::Memory);
1180       insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder);
1181 
1182       merge_mem = MergeMemNode::make(mem);
1183       transform_later(merge_mem);
1184     }
1185 
1186     // Call StubRoutines::generic_arraycopy stub.
1187     Node* mem = generate_arraycopy(ac, NULL, &ctrl, merge_mem, &io,
1188                                    TypeRawPtr::BOTTOM, T_CONFLICT,
1189                                    src, src_offset, dest, dest_offset, length,
1190                                    // If a  negative length guard was generated for the ArrayCopyNode,
1191                                    // the length of the array can never be negative.
1192                                    false, ac->has_negative_length_guard());
1193 
1194     // Do not let reads from the destination float above the arraycopy.
1195     // Since we cannot type the arrays, we don't know which slices
1196     // might be affected.  We could restrict this barrier only to those
1197     // memory slices which pertain to array elements--but don't bother.
1198     if (!InsertMemBarAfterArraycopy) {
1199       // (If InsertMemBarAfterArraycopy, there is already one in place.)
1200       insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder);
1201     }
1202     return;
1203   }
1204 
1205   assert(!ac->is_arraycopy_validated() || (src_elem == dest_elem && dest_elem != T_VOID), "validated but different basic types");
1206 
1207   // (2) src and dest arrays must have elements of the same BasicType
1208   // Figure out the size and type of the elements we will be copying.
1209   if (src_elem != dest_elem || dest_elem == T_VOID) {
1210     // The component types are not the same or are not recognized.  Punt.
1211     // (But, avoid the native method wrapper to JVM_ArrayCopy.)
1212     {
1213       Node* mem = ac->in(TypeFunc::Memory);
1214       merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false);
1215     }
1216 
1217     _igvn.replace_node(_memproj_fallthrough, merge_mem);
1218     _igvn.replace_node(_ioproj_fallthrough, io);
1219     _igvn.replace_node(_fallthroughcatchproj, ctrl);
1220     return;
1221   }
1222 
1223   //---------------------------------------------------------------------------
1224   // We will make a fast path for this call to arraycopy.
1225 
1226   // We have the following tests left to perform:
1227   //
1228   // (3) src and dest must not be null.
1229   // (4) src_offset must not be negative.
1230   // (5) dest_offset must not be negative.
1231   // (6) length must not be negative.
1232   // (7) src_offset + length must not exceed length of src.
1233   // (8) dest_offset + length must not exceed length of dest.
1234   // (9) each element of an oop array must be assignable
1235 
1236   {
1237     Node* mem = ac->in(TypeFunc::Memory);
1238     merge_mem = MergeMemNode::make(mem);
1239     transform_later(merge_mem);
1240   }
1241 
1242   RegionNode* slow_region = new RegionNode(1);
1243   transform_later(slow_region);
1244 
1245   if (!ac->is_arraycopy_validated()) {
1246     // (3) operands must not be null
1247     // We currently perform our null checks with the null_check routine.
1248     // This means that the null exceptions will be reported in the caller
1249     // rather than (correctly) reported inside of the native arraycopy call.
1250     // This should be corrected, given time.  We do our null check with the
1251     // stack pointer restored.
1252     // null checks done library_call.cpp
1253 
1254     // (4) src_offset must not be negative.
1255     generate_negative_guard(&ctrl, src_offset, slow_region);
1256 
1257     // (5) dest_offset must not be negative.
1258     generate_negative_guard(&ctrl, dest_offset, slow_region);
1259 
1260     // (6) length must not be negative (moved to generate_arraycopy()).
1261     // generate_negative_guard(length, slow_region);
1262 
1263     // (7) src_offset + length must not exceed length of src.
1264     Node* alen = ac->in(ArrayCopyNode::SrcLen);
1265     assert(alen != NULL, "need src len");
1266     generate_limit_guard(&ctrl,
1267                          src_offset, length,
1268                          alen,
1269                          slow_region);
1270 
1271     // (8) dest_offset + length must not exceed length of dest.
1272     alen = ac->in(ArrayCopyNode::DestLen);
1273     assert(alen != NULL, "need dest len");
1274     generate_limit_guard(&ctrl,
1275                          dest_offset, length,
1276                          alen,
1277                          slow_region);
1278 
1279     // (9) each element of an oop array must be assignable
1280     // The generate_arraycopy subroutine checks this.
1281   }
1282   // This is where the memory effects are placed:
1283   const TypePtr* adr_type = NULL;
1284   if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1285     adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
1286   } else {
1287     adr_type = TypeAryPtr::get_array_body_type(dest_elem);
1288   }
1289 
1290   generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
1291                      adr_type, dest_elem,
1292                      src, src_offset, dest, dest_offset, length,
1293                      // If a  negative length guard was generated for the ArrayCopyNode,
1294                      // the length of the array can never be negative.
1295                      false, ac->has_negative_length_guard(), slow_region);
1296 }