1 /* 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "oops/objArrayKlass.hpp" 27 #include "opto/graphKit.hpp" 28 #include "opto/macro.hpp" 29 #include "opto/runtime.hpp" 30 31 32 void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent) { 33 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); 34 mb->init_req(TypeFunc::Control, *ctrl); 35 mb->init_req(TypeFunc::Memory, *mem); 36 transform_later(mb); 37 *ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control); 38 transform_later(*ctrl); 39 Node* mem_proj = new (C, 1) ProjNode(mb,TypeFunc::Memory); 40 transform_later(mem_proj); 41 *mem = mem_proj; 42 } 43 44 Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt) { 45 uint shift = exact_log2(type2aelembytes(elembt)); 46 uint header = arrayOopDesc::base_offset_in_bytes(elembt); 47 Node* base = basic_plus_adr(ary, header); 48 #ifdef _LP64 49 // see comment in GraphKit::array_element_address 50 int index_max = max_jint - 1; // array size is max_jint, index is one less 51 const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax); 52 idx = transform_later( new (C, 2) ConvI2LNode(idx, lidxtype) ); 53 #endif 54 Node* scale = new (C, 3) LShiftXNode(idx, intcon(shift)); 55 transform_later(scale); 56 return basic_plus_adr(ary, base, scale); 57 } 58 59 Node* PhaseMacroExpand::ConvI2L(Node* offset) { 60 return transform_later( new (C, 2) ConvI2LNode(offset)); 61 } 62 63 Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem, 64 const TypeFunc* call_type, address call_addr, 65 const char* call_name, 66 const TypePtr* adr_type, 67 Node* parm0, Node* parm1, 68 Node* parm2, Node* parm3, 69 Node* parm4, Node* parm5, 70 Node* parm6, Node* parm7) { 71 int size = call_type->domain()->cnt(); 72 Node* call = new(C, size) CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); 73 call->init_req(TypeFunc::Control, ctrl ); 74 call->init_req(TypeFunc::I_O , top() ); 75 call->init_req(TypeFunc::Memory , mem ); 76 call->init_req(TypeFunc::ReturnAdr, top() ); 77 call->init_req(TypeFunc::FramePtr, top() ); 78 79 // Hook each parm in order. Stop looking at the first NULL. 80 if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0); 81 if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1); 82 if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2); 83 if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3); 84 if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4); 85 if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5); 86 if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6); 87 if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7); 88 /* close each nested if ===> */ } } } } } } } } 89 assert(call->in(call->req()-1) != NULL, "must initialize all parms"); 90 91 return call; 92 } 93 94 95 //------------------------------generate_guard--------------------------- 96 // Helper function for generating guarded fast-slow graph structures. 97 // The given 'test', if true, guards a slow path. If the test fails 98 // then a fast path can be taken. (We generally hope it fails.) 99 // In all cases, GraphKit::control() is updated to the fast path. 100 // The returned value represents the control for the slow path. 101 // The return value is never 'top'; it is either a valid control 102 // or NULL if it is obvious that the slow path can never be taken. 103 // Also, if region and the slow control are not NULL, the slow edge 104 // is appended to the region. 105 Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) { 106 if ((*ctrl)->is_top()) { 107 // Already short circuited. 108 return NULL; 109 } 110 // Build an if node and its projections. 111 // If test is true we take the slow path, which we assume is uncommon. 112 if (_igvn.type(test) == TypeInt::ZERO) { 113 // The slow branch is never taken. No need to build this guard. 114 return NULL; 115 } 116 117 IfNode* iff = new (C, 2) IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN); 118 transform_later(iff); 119 120 Node* if_slow = new (C, 1) IfTrueNode(iff); 121 transform_later(if_slow); 122 123 if (region != NULL) { 124 region->add_req(if_slow); 125 } 126 127 Node* if_fast = new (C, 1) IfFalseNode(iff); 128 transform_later(if_fast); 129 130 *ctrl = if_fast; 131 132 return if_slow; 133 } 134 135 inline Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) { 136 return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3)); 137 } 138 139 void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) { 140 if ((*ctrl)->is_top()) 141 return; // already stopped 142 if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] 143 return; // index is already adequately typed 144 Node* cmp_lt = new (C, 3) CmpINode(index, intcon(0)); 145 transform_later(cmp_lt); 146 Node* bol_lt = new (C, 2) BoolNode(cmp_lt, BoolTest::lt); 147 transform_later(bol_lt); 148 generate_guard(ctrl, bol_lt, region, PROB_MIN); 149 } 150 151 Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) { 152 if ((*ctrl)->is_top()) return NULL; 153 154 if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] 155 return NULL; // index is already adequately typed 156 Node* cmp_le = new (C, 3) CmpINode(index, intcon(0)); 157 transform_later(cmp_le); 158 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); 159 Node* bol_le = new (C, 2) BoolNode(cmp_le, le_or_eq); 160 transform_later(bol_le); 161 Node* is_notp = generate_guard(ctrl, bol_le, NULL, PROB_MIN); 162 163 return is_notp; 164 } 165 166 void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) { 167 bool zero_offset = _igvn.type(offset) == TypeInt::ZERO; 168 if (zero_offset && subseq_length->eqv_uncast(array_length)) 169 return; // common case of whole-array copy 170 Node* last = subseq_length; 171 if (!zero_offset) { // last += offset 172 last = new (C, 3) AddINode(last, offset); 173 transform_later(last); 174 } 175 Node* cmp_lt = new (C, 3) CmpUNode(array_length, last); 176 transform_later(cmp_lt); 177 Node* bol_lt = new (C, 2) BoolNode(cmp_lt, BoolTest::lt); 178 transform_later(bol_lt); 179 generate_guard(ctrl, bol_lt, region, PROB_MIN); 180 } 181 182 void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) { 183 transform_later(call); 184 185 *ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); 186 transform_later(*ctrl); 187 Node* newmem = new (C, 1) ProjNode(call, TypeFunc::Memory); 188 transform_later(newmem); 189 190 uint alias_idx = C->get_alias_index(adr_type); 191 if (alias_idx != Compile::AliasIdxBot) { 192 *mem = MergeMemNode::make(C, *mem); 193 (*mem)->set_memory_at(alias_idx, newmem); 194 } else { 195 *mem = MergeMemNode::make(C, newmem); 196 } 197 transform_later(*mem); 198 } 199 200 address PhaseMacroExpand::basictype2arraycopy(BasicType t, 201 Node* src_offset, 202 Node* dest_offset, 203 bool disjoint_bases, 204 const char* &name, 205 bool dest_uninitialized) { 206 const TypeInt* src_offset_inttype = _igvn.find_int_type(src_offset);; 207 const TypeInt* dest_offset_inttype = _igvn.find_int_type(dest_offset);; 208 209 bool aligned = false; 210 bool disjoint = disjoint_bases; 211 212 // if the offsets are the same, we can treat the memory regions as 213 // disjoint, because either the memory regions are in different arrays, 214 // or they are identical (which we can treat as disjoint.) We can also 215 // treat a copy with a destination index less that the source index 216 // as disjoint since a low->high copy will work correctly in this case. 217 if (src_offset_inttype != NULL && src_offset_inttype->is_con() && 218 dest_offset_inttype != NULL && dest_offset_inttype->is_con()) { 219 // both indices are constants 220 int s_offs = src_offset_inttype->get_con(); 221 int d_offs = dest_offset_inttype->get_con(); 222 int element_size = type2aelembytes(t); 223 aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && 224 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0); 225 if (s_offs >= d_offs) disjoint = true; 226 } else if (src_offset == dest_offset && src_offset != NULL) { 227 // This can occur if the offsets are identical non-constants. 228 disjoint = true; 229 } 230 231 return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); 232 } 233 234 #define COMMA , 235 #define XTOP LP64_ONLY(COMMA top()) 236 237 void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { 238 Node* ctrl = ac->in(TypeFunc::Control); 239 Node* io = ac->in(TypeFunc::I_O); 240 Node* src = ac->in(ArrayCopyNode::Src); 241 Node* src_offset = ac->in(ArrayCopyNode::SrcPos); 242 Node* dest = ac->in(ArrayCopyNode::Dest); 243 Node* dest_offset = ac->in(ArrayCopyNode::DestPos); 244 Node* length = ac->in(ArrayCopyNode::Length); 245 MergeMemNode* merge_mem = NULL; 246 247 if (ac->is_clonebasic()) { 248 assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null"); 249 Node* mem = ac->in(TypeFunc::Memory); 250 const char* copyfunc_name = "arraycopy"; 251 address copyfunc_addr = 252 basictype2arraycopy(T_LONG, NULL, NULL, 253 true, copyfunc_name, true); 254 255 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 256 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); 257 258 Node* call = make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP); 259 transform_later(call); 260 261 _igvn.replace_node(ac, call); 262 return; 263 } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_cloneoop()) { 264 Node* mem = ac->in(TypeFunc::Memory); 265 merge_mem = MergeMemNode::make(C, mem); 266 transform_later(merge_mem); 267 268 RegionNode* slow_region = new(C,1) RegionNode(1); 269 transform_later(slow_region); 270 271 AllocateArrayNode* alloc = NULL; 272 if (ac->is_alloc_tightly_coupled()) { 273 alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn); 274 assert(alloc != NULL, "expect alloc"); 275 } 276 277 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, 278 TypeAryPtr::OOPS, T_OBJECT, 279 src, src_offset, dest, dest_offset, length, 280 true, !ac->is_copyofrange()); 281 282 return; 283 } 284 285 AllocateArrayNode* alloc = NULL; 286 if (ac->is_alloc_tightly_coupled()) { 287 alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn); 288 assert(alloc != NULL, "expect alloc"); 289 } 290 291 assert(ac->is_arraycopy() || ac->is_arraycopy_notest(), "should be an arraycopy"); 292 293 // Compile time checks. If any of these checks cannot be verified at compile time, 294 // we do not make a fast path for this call. Instead, we let the call remain as it 295 // is. The checks we choose to mandate at compile time are: 296 // 297 // (1) src and dest are arrays. 298 const Type* src_type = src->Value(&_igvn); 299 const Type* dest_type = dest->Value(&_igvn); 300 const TypeAryPtr* top_src = src_type->isa_aryptr(); 301 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); 302 303 if (top_src == NULL || top_src->klass() == NULL || 304 top_dest == NULL || top_dest->klass() == NULL) { 305 // Conservatively insert a memory barrier on all memory slices. 306 // Do not let writes into the source float below the arraycopy. 307 { 308 Node* mem = ac->in(TypeFunc::Memory); 309 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 310 311 merge_mem = MergeMemNode::make(C, mem); 312 transform_later(merge_mem); 313 } 314 315 // Call StubRoutines::generic_arraycopy stub. 316 Node* mem = generate_arraycopy(ac, NULL, &ctrl, merge_mem, &io, 317 TypeRawPtr::BOTTOM, T_CONFLICT, 318 src, src_offset, dest, dest_offset, length); 319 320 // Do not let reads from the destination float above the arraycopy. 321 // Since we cannot type the arrays, we don't know which slices 322 // might be affected. We could restrict this barrier only to those 323 // memory slices which pertain to array elements--but don't bother. 324 if (!InsertMemBarAfterArraycopy) { 325 // (If InsertMemBarAfterArraycopy, there is already one in place.) 326 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 327 } 328 return; 329 } 330 // (2) src and dest arrays must have elements of the same BasicType 331 // Figure out the size and type of the elements we will be copying. 332 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type(); 333 BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type(); 334 if (src_elem == T_ARRAY) src_elem = T_OBJECT; 335 if (dest_elem == T_ARRAY) dest_elem = T_OBJECT; 336 337 if (src_elem != dest_elem || dest_elem == T_VOID) { 338 // The component types are not the same or are not recognized. Punt. 339 // (But, avoid the native method wrapper to JVM_ArrayCopy.) 340 { 341 Node* mem = ac->in(TypeFunc::Memory); 342 merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false); 343 } 344 345 _igvn.replace_node(_memproj_fallthrough, merge_mem); 346 _igvn.replace_node(_ioproj_fallthrough, io); 347 _igvn.replace_node(_fallthroughcatchproj, ctrl); 348 return; 349 } 350 351 //--------------------------------------------------------------------------- 352 // We will make a fast path for this call to arraycopy. 353 354 // We have the following tests left to perform: 355 // 356 // (3) src and dest must not be null. 357 // (4) src_offset must not be negative. 358 // (5) dest_offset must not be negative. 359 // (6) length must not be negative. 360 // (7) src_offset + length must not exceed length of src. 361 // (8) dest_offset + length must not exceed length of dest. 362 // (9) each element of an oop array must be assignable 363 364 { 365 Node* mem = ac->in(TypeFunc::Memory); 366 merge_mem = MergeMemNode::make(C, mem); 367 transform_later(merge_mem); 368 } 369 370 RegionNode* slow_region = new (C, 1) RegionNode(1); 371 transform_later(slow_region); 372 373 if (!ac->is_arraycopy_notest()) { 374 // (3) operands must not be null 375 // We currently perform our null checks with the do_null_check routine. 376 // This means that the null exceptions will be reported in the caller 377 // rather than (correctly) reported inside of the native arraycopy call. 378 // This should be corrected, given time. We do our null check with the 379 // stack pointer restored. 380 // null checks done library_call.cpp 381 382 // (4) src_offset must not be negative. 383 generate_negative_guard(&ctrl, src_offset, slow_region); 384 385 // (5) dest_offset must not be negative. 386 generate_negative_guard(&ctrl, dest_offset, slow_region); 387 388 // (6) length must not be negative (moved to generate_arraycopy()). 389 // generate_negative_guard(length, slow_region); 390 391 // (7) src_offset + length must not exceed length of src. 392 Node* r_adr = new (C, 4) AddPNode(src, src, MakeConX(arrayOopDesc::length_offset_in_bytes())); 393 transform_later(r_adr); 394 Node* alen = new (C, 3) LoadRangeNode(0, C->immutable_memory(), r_adr, TypeInt::POS); 395 transform_later(alen); 396 generate_limit_guard(&ctrl, 397 src_offset, length, 398 alen, 399 slow_region); 400 401 // (8) dest_offset + length must not exceed length of dest. 402 r_adr = new (C, 4) AddPNode(dest, dest, MakeConX(arrayOopDesc::length_offset_in_bytes())); 403 transform_later(r_adr); 404 alen = new (C, 3) LoadRangeNode(0, C->immutable_memory(), r_adr, TypeInt::POS); 405 transform_later(alen); 406 generate_limit_guard(&ctrl, 407 dest_offset, length, 408 alen, 409 slow_region); 410 411 // (9) each element of an oop array must be assignable 412 // The generate_arraycopy subroutine checks this. 413 } 414 // This is where the memory effects are placed: 415 const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem); 416 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, 417 adr_type, dest_elem, 418 src, src_offset, dest, dest_offset, length, 419 false, false, slow_region); 420 } 421 422 Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc, 423 Node** ctrl, MergeMemNode* mem, Node** io, 424 const TypePtr* adr_type, 425 BasicType basic_elem_type, 426 Node* src, Node* src_offset, 427 Node* dest, Node* dest_offset, 428 Node* copy_length, 429 bool disjoint_bases, 430 bool length_never_negative, 431 RegionNode* slow_region) { 432 if (slow_region == NULL) { 433 slow_region = new(C,1) RegionNode(1); 434 transform_later(slow_region); 435 436 } 437 438 Node* original_dest = dest; 439 bool dest_uninitialized = false; 440 441 // See if this is the initialization of a newly-allocated array. 442 // If so, we will take responsibility here for initializing it to zero. 443 // (Note: Because tightly_coupled_allocation performs checks on the 444 // out-edges of the dest, we need to avoid making derived pointers 445 // from it until we have checked its uses.) 446 if (ReduceBulkZeroing 447 && !ZeroTLAB // pointless if already zeroed 448 && basic_elem_type != T_CONFLICT // avoid corner case 449 && !src->eqv_uncast(dest) 450 && alloc != NULL 451 && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0 452 && alloc->maybe_set_complete(&_igvn)) { 453 // "You break it, you buy it." 454 InitializeNode* init = alloc->initialization(); 455 assert(init->is_complete(), "we just did this"); 456 init->set_complete_with_arraycopy(); 457 assert(dest->is_CheckCastPP(), "sanity"); 458 assert(dest->in(0)->in(0) == init, "dest pinned"); 459 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory 460 // From this point on, every exit path is responsible for 461 // initializing any non-copied parts of the object to zero. 462 // Also, if this flag is set we make sure that arraycopy interacts properly 463 // with G1, eliding pre-barriers. See CR 6627983. 464 dest_uninitialized = true; 465 } else { 466 // No zeroing elimination here. 467 alloc = NULL; 468 //original_dest = dest; 469 //dest_uninitialized = false; 470 } 471 472 uint alias_idx = C->get_alias_index(adr_type); 473 474 // Results are placed here: 475 enum { fast_path = 1, // normal void-returning assembly stub 476 checked_path = 2, // special assembly stub with cleanup 477 slow_call_path = 3, // something went wrong; call the VM 478 zero_path = 4, // bypass when length of copy is zero 479 bcopy_path = 5, // copy primitive array by 64-bit blocks 480 PATH_LIMIT = 6 481 }; 482 RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); 483 PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO); 484 PhiNode* result_memory = new(C, PATH_LIMIT) PhiNode(result_region, Type::MEMORY, adr_type); 485 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice"); 486 transform_later(result_region); 487 transform_later(result_i_o); 488 transform_later(result_memory); 489 490 // The slow_control path: 491 Node* slow_control; 492 Node* slow_i_o = *io; 493 Node* slow_mem = mem->memory_at(alias_idx); 494 debug_only(slow_control = (Node*) badAddress); 495 496 // Checked control path: 497 Node* checked_control = top(); 498 Node* checked_mem = NULL; 499 Node* checked_i_o = NULL; 500 Node* checked_value = NULL; 501 502 if (basic_elem_type == T_CONFLICT) { 503 assert(!dest_uninitialized, ""); 504 Node* cv = generate_generic_arraycopy(ctrl, &mem, 505 adr_type, 506 src, src_offset, dest, dest_offset, 507 copy_length, dest_uninitialized); 508 if (cv == NULL) cv = intcon(-1); // failure (no stub available) 509 checked_control = *ctrl; 510 checked_i_o = *io; 511 checked_mem = mem->memory_at(alias_idx); 512 checked_value = cv; 513 *ctrl = top(); 514 } 515 516 Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative); 517 if (not_pos != NULL) { 518 Node* local_ctrl = not_pos, *local_io = *io; 519 MergeMemNode* local_mem = MergeMemNode::make(C, mem); 520 transform_later(local_mem); 521 522 // (6) length must not be negative. 523 if (!length_never_negative) { 524 generate_negative_guard(&local_ctrl, copy_length, slow_region); 525 } 526 527 // copy_length is 0. 528 if (dest_uninitialized) { 529 assert(!local_ctrl->is_top(), "no ctrl?"); 530 Node* dest_length = alloc->in(AllocateNode::ALength); 531 if (copy_length->eqv_uncast(dest_length) 532 || _igvn.find_int_con(dest_length, 1) <= 0) { 533 // There is no zeroing to do. No need for a secondary raw memory barrier. 534 } else { 535 // Clear the whole thing since there are no source elements to copy. 536 generate_clear_array(local_ctrl, local_mem, 537 adr_type, dest, basic_elem_type, 538 intcon(0), NULL, 539 alloc->in(AllocateNode::AllocSize)); 540 // Use a secondary InitializeNode as raw memory barrier. 541 // Currently it is needed only on this path since other 542 // paths have stub or runtime calls as raw memory barriers. 543 MemBarNode* mb = MemBarNode::make(C, Op_Initialize, 544 Compile::AliasIdxRaw, 545 top()); 546 transform_later(mb); 547 mb->set_req(TypeFunc::Control,local_ctrl); 548 mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw)); 549 local_ctrl = transform_later(new (C, 1) ProjNode(mb, TypeFunc::Control)); 550 local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new (C, 1) ProjNode(mb, TypeFunc::Memory))); 551 552 InitializeNode* init = mb->as_Initialize(); 553 init->set_complete(&_igvn); // (there is no corresponding AllocateNode) 554 } 555 } 556 557 // Present the results of the fast call. 558 result_region->init_req(zero_path, local_ctrl); 559 result_i_o ->init_req(zero_path, local_io); 560 result_memory->init_req(zero_path, local_mem->memory_at(alias_idx)); 561 } 562 563 if (!(*ctrl)->is_top() && dest_uninitialized) { 564 // We have to initialize the *uncopied* part of the array to zero. 565 // The copy destination is the slice dest[off..off+len]. The other slices 566 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. 567 Node* dest_size = alloc->in(AllocateNode::AllocSize); 568 Node* dest_length = alloc->in(AllocateNode::ALength); 569 Node* dest_tail = transform_later( new(C,3) AddINode(dest_offset, 570 copy_length) ); 571 572 // If there is a head section that needs zeroing, do it now. 573 if (_igvn.find_int_con(dest_offset, -1) != 0) { 574 generate_clear_array(*ctrl, mem, 575 adr_type, dest, basic_elem_type, 576 intcon(0), dest_offset, 577 NULL); 578 } 579 580 // Next, perform a dynamic check on the tail length. 581 // It is often zero, and we can win big if we prove this. 582 // There are two wins: Avoid generating the ClearArray 583 // with its attendant messy index arithmetic, and upgrade 584 // the copy to a more hardware-friendly word size of 64 bits. 585 Node* tail_ctl = NULL; 586 if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) { 587 Node* cmp_lt = transform_later( new(C,3) CmpINode(dest_tail, dest_length) ); 588 Node* bol_lt = transform_later( new(C,2) BoolNode(cmp_lt, BoolTest::lt) ); 589 tail_ctl = generate_slow_guard(ctrl, bol_lt, NULL); 590 assert(tail_ctl != NULL || !(*ctrl)->is_top(), "must be an outcome"); 591 } 592 593 // At this point, let's assume there is no tail. 594 if (!(*ctrl)->is_top() && alloc != NULL && basic_elem_type != T_OBJECT) { 595 // There is no tail. Try an upgrade to a 64-bit copy. 596 bool didit = false; 597 { 598 Node* local_ctrl = *ctrl, *local_io = *io; 599 MergeMemNode* local_mem = MergeMemNode::make(C, mem); 600 transform_later(local_mem); 601 602 didit = generate_block_arraycopy(&local_ctrl, &local_mem, local_io, 603 adr_type, basic_elem_type, alloc, 604 src, src_offset, dest, dest_offset, 605 dest_size, dest_uninitialized); 606 if (didit) { 607 // Present the results of the block-copying fast call. 608 result_region->init_req(bcopy_path, local_ctrl); 609 result_i_o ->init_req(bcopy_path, local_io); 610 result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx)); 611 } 612 } 613 if (didit) { 614 *ctrl = top(); // no regular fast path 615 } 616 } 617 618 // Clear the tail, if any. 619 if (tail_ctl != NULL) { 620 Node* notail_ctl = (*ctrl)->is_top() ? NULL : *ctrl; 621 *ctrl = tail_ctl; 622 if (notail_ctl == NULL) { 623 generate_clear_array(*ctrl, mem, 624 adr_type, dest, basic_elem_type, 625 dest_tail, NULL, 626 dest_size); 627 } else { 628 // Make a local merge. 629 Node* done_ctl = transform_later(new(C,3) RegionNode(3)); 630 Node* done_mem = transform_later(new(C,3) PhiNode(done_ctl, Type::MEMORY, adr_type)); 631 done_ctl->init_req(1, notail_ctl); 632 done_mem->init_req(1, mem->memory_at(alias_idx)); 633 generate_clear_array(*ctrl, mem, 634 adr_type, dest, basic_elem_type, 635 dest_tail, NULL, 636 dest_size); 637 done_ctl->init_req(2, *ctrl); 638 done_mem->init_req(2, mem->memory_at(alias_idx)); 639 *ctrl = done_ctl; 640 mem->set_memory_at(alias_idx, done_mem); 641 } 642 } 643 } 644 645 BasicType copy_type = basic_elem_type; 646 assert(basic_elem_type != T_ARRAY, "caller must fix this"); 647 if (!(*ctrl)->is_top() && copy_type == T_OBJECT) { 648 // If src and dest have compatible element types, we can copy bits. 649 // Types S[] and D[] are compatible if D is a supertype of S. 650 // 651 // If they are not, we will use checked_oop_disjoint_arraycopy, 652 // which performs a fast optimistic per-oop check, and backs off 653 // further to JVM_ArrayCopy on the first per-oop check that fails. 654 // (Actually, we don't move raw bits only; the GC requires card marks.) 655 656 // Get the klass* for both src and dest 657 Node* k_adr = new (C, 4) AddPNode(src, src, MakeConX(oopDesc::klass_offset_in_bytes())); 658 transform_later(k_adr); 659 Node* src_klass = LoadKlassNode::make(_igvn, C->immutable_memory(), k_adr, TypeInstPtr::KLASS); 660 transform_later(src_klass); 661 k_adr = new (C, 4) AddPNode(dest, dest, MakeConX(oopDesc::klass_offset_in_bytes())); 662 transform_later(k_adr); 663 Node* dest_klass = LoadKlassNode::make(_igvn, C->immutable_memory(), k_adr, TypeInstPtr::KLASS); 664 transform_later(dest_klass); 665 666 // Generate the subtype check. 667 // This might fold up statically, or then again it might not. 668 // 669 // Non-static example: Copying List<String>.elements to a new String[]. 670 // The backing store for a List<String> is always an Object[], 671 // but its elements are always type String, if the generic types 672 // are correct at the source level. 673 // 674 // Test S[] against D[], not S against D, because (probably) 675 // the secondary supertype cache is less busy for S[] than S. 676 // This usually only matters when D is an interface. 677 Node* not_subtype_ctrl = ac->is_arraycopy_notest() ? top() : GraphKit::gen_subtype_check_any_phase(src_klass, dest_klass, ctrl, mem, &_igvn); 678 // Plug failing path into checked_oop_disjoint_arraycopy 679 if (not_subtype_ctrl != top()) { 680 Node* local_ctrl = *ctrl; 681 MergeMemNode* local_mem = MergeMemNode::make(C, mem); 682 transform_later(local_mem); 683 684 local_ctrl = not_subtype_ctrl; 685 // (At this point we can assume disjoint_bases, since types differ.) 686 int ek_offset = in_bytes(objArrayKlass::element_klass_offset()); 687 Node* p1 = basic_plus_adr(dest_klass, ek_offset); 688 Node* n1 = LoadKlassNode::make(_igvn, C->immutable_memory(), p1, TypeRawPtr::BOTTOM); 689 Node* dest_elem_klass = transform_later(n1); 690 Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem, 691 adr_type, 692 dest_elem_klass, 693 src, src_offset, dest, dest_offset, 694 ConvI2X(copy_length), dest_uninitialized); 695 if (cv == NULL) cv = intcon(-1); // failure (no stub available) 696 checked_control = local_ctrl; 697 checked_i_o = *io; 698 checked_mem = local_mem->memory_at(alias_idx); 699 checked_value = cv; 700 } 701 // At this point we know we do not need type checks on oop stores. 702 703 // Let's see if we need card marks: 704 if (alloc != NULL && GraphKit::use_ReduceInitialCardMarks()) { 705 // If we do not need card marks, copy using the jint or jlong stub. 706 copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); 707 assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), 708 "sizes agree"); 709 } 710 } 711 712 if (!(*ctrl)->is_top()) { 713 // Generate the fast path, if possible. 714 Node* local_ctrl = *ctrl; 715 MergeMemNode* local_mem = MergeMemNode::make(C, mem); 716 transform_later(local_mem); 717 718 generate_unchecked_arraycopy(&local_ctrl, &local_mem, 719 adr_type, copy_type, disjoint_bases, 720 src, src_offset, dest, dest_offset, 721 ConvI2X(copy_length), dest_uninitialized); 722 723 // Present the results of the fast call. 724 result_region->init_req(fast_path, local_ctrl); 725 result_i_o ->init_req(fast_path, *io); 726 result_memory->init_req(fast_path, local_mem->memory_at(alias_idx)); 727 } 728 729 730 slow_control = top(); 731 if (slow_region != NULL) 732 slow_control = slow_region; // XXX 733 debug_only(slow_region = (RegionNode*)badAddress); 734 735 *ctrl = checked_control; 736 if (!(*ctrl)->is_top()) { 737 // Clean up after the checked call. 738 // The returned value is either 0 or -1^K, 739 // where K = number of partially transferred array elements. 740 Node* cmp = new(C, 3) CmpINode(checked_value, intcon(0)); 741 transform_later(cmp); 742 Node* bol = new(C, 2) BoolNode(cmp, BoolTest::eq); 743 transform_later(bol); 744 IfNode* iff = new (C, 2) IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN); 745 transform_later(iff); 746 747 // If it is 0, we are done, so transfer to the end. 748 Node* checks_done = new(C, 1) IfTrueNode(iff); 749 transform_later(checks_done); 750 result_region->init_req(checked_path, checks_done); 751 result_i_o ->init_req(checked_path, checked_i_o); 752 result_memory->init_req(checked_path, checked_mem); 753 754 // If it is not zero, merge into the slow call. 755 *ctrl = new(C, 1) IfFalseNode(iff); 756 transform_later(*ctrl); 757 RegionNode* slow_reg2 = new(C, 3) RegionNode(3); 758 PhiNode* slow_i_o2 = new(C, 3) PhiNode(slow_reg2, Type::ABIO); 759 PhiNode* slow_mem2 = new(C, 3) PhiNode(slow_reg2, Type::MEMORY, adr_type); 760 transform_later(slow_reg2); 761 transform_later(slow_i_o2); 762 transform_later(slow_mem2); 763 slow_reg2 ->init_req(1, slow_control); 764 slow_i_o2 ->init_req(1, slow_i_o); 765 slow_mem2 ->init_req(1, slow_mem); 766 slow_reg2 ->init_req(2, *ctrl); 767 slow_i_o2 ->init_req(2, checked_i_o); 768 slow_mem2 ->init_req(2, checked_mem); 769 770 slow_control = slow_reg2; 771 slow_i_o = slow_i_o2; 772 slow_mem = slow_mem2; 773 774 if (alloc != NULL) { 775 // We'll restart from the very beginning, after zeroing the whole thing. 776 // This can cause double writes, but that's OK since dest is brand new. 777 // So we ignore the low 31 bits of the value returned from the stub. 778 } else { 779 // We must continue the copy exactly where it failed, or else 780 // another thread might see the wrong number of writes to dest. 781 Node* checked_offset = new(C, 3) XorINode(checked_value, intcon(-1)); 782 Node* slow_offset = new(C, 3) PhiNode(slow_reg2, TypeInt::INT); 783 transform_later(checked_offset); 784 transform_later(slow_offset); 785 slow_offset->init_req(1, intcon(0)); 786 slow_offset->init_req(2, checked_offset); 787 788 // Adjust the arguments by the conditionally incoming offset. 789 Node* src_off_plus = new(C, 3) AddINode(src_offset, slow_offset); 790 transform_later(src_off_plus); 791 Node* dest_off_plus = new(C, 3) AddINode(dest_offset, slow_offset); 792 transform_later(dest_off_plus); 793 Node* length_minus = new(C, 3) SubINode(copy_length, slow_offset); 794 transform_later(length_minus); 795 796 // Tweak the node variables to adjust the code produced below: 797 src_offset = src_off_plus; 798 dest_offset = dest_off_plus; 799 copy_length = length_minus; 800 } 801 } 802 *ctrl = slow_control; 803 if (!(*ctrl)->is_top()) { 804 Node* local_ctrl = *ctrl, *local_io = slow_i_o; 805 MergeMemNode* local_mem = MergeMemNode::make(C, mem); 806 transform_later(local_mem); 807 808 // Generate the slow path, if needed. 809 local_mem->set_memory_at(alias_idx, slow_mem); 810 811 if (dest_uninitialized) { 812 generate_clear_array(local_ctrl, local_mem, 813 adr_type, dest, basic_elem_type, 814 intcon(0), NULL, 815 alloc->in(AllocateNode::AllocSize)); 816 } 817 818 local_mem = generate_slow_arraycopy(ac, 819 &local_ctrl, local_mem, &local_io, 820 adr_type, 821 src, src_offset, dest, dest_offset, 822 copy_length, /*dest_uninitialized*/false); 823 824 result_region->init_req(slow_call_path, local_ctrl); 825 result_i_o ->init_req(slow_call_path, local_io); 826 result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx)); 827 } else { 828 ShouldNotReachHere(); // no call to generate_slow_arraycopy: 829 // projections were not extracted 830 } 831 832 // Remove unused edges. 833 for (uint i = 1; i < result_region->req(); i++) { 834 if (result_region->in(i) == NULL) 835 result_region->init_req(i, top()); 836 } 837 838 // Finished; return the combined state. 839 *ctrl = result_region; 840 *io = result_i_o; 841 mem->set_memory_at(alias_idx, result_memory); 842 843 // mem no longer guaranteed to stay a MergeMemNode 844 Node* out_mem = mem; 845 DEBUG_ONLY(mem = NULL); 846 847 // The memory edges above are precise in order to model effects around 848 // array copies accurately to allow value numbering of field loads around 849 // arraycopy. Such field loads, both before and after, are common in Java 850 // collections and similar classes involving header/array data structures. 851 // 852 // But with low number of register or when some registers are used or killed 853 // by arraycopy calls it causes registers spilling on stack. See 6544710. 854 // The next memory barrier is added to avoid it. If the arraycopy can be 855 // optimized away (which it can, sometimes) then we can manually remove 856 // the membar also. 857 // 858 // Do not let reads from the cloned object float above the arraycopy. 859 if (alloc != NULL && !alloc->initialization()->does_not_escape()) { 860 // Do not let stores that initialize this object be reordered with 861 // a subsequent store that would make this object accessible by 862 // other threads. 863 insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore); 864 } else if (InsertMemBarAfterArraycopy) { 865 insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder); 866 } 867 868 _igvn.replace_node(_memproj_fallthrough, out_mem); 869 _igvn.replace_node(_ioproj_fallthrough, *io); 870 _igvn.replace_node(_fallthroughcatchproj, *ctrl); 871 872 return out_mem; 873 } 874 875 // Helper for initialization of arrays, creating a ClearArray. 876 // It writes zero bits in [start..end), within the body of an array object. 877 // The memory effects are all chained onto the 'adr_type' alias category. 878 // 879 // Since the object is otherwise uninitialized, we are free 880 // to put a little "slop" around the edges of the cleared area, 881 // as long as it does not go back into the array's header, 882 // or beyond the array end within the heap. 883 // 884 // The lower edge can be rounded down to the nearest jint and the 885 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes. 886 // 887 // Arguments: 888 // adr_type memory slice where writes are generated 889 // dest oop of the destination array 890 // basic_elem_type element type of the destination 891 // slice_idx array index of first element to store 892 // slice_len number of elements to store (or NULL) 893 // dest_size total size in bytes of the array object 894 // 895 // Exactly one of slice_len or dest_size must be non-NULL. 896 // If dest_size is non-NULL, zeroing extends to the end of the object. 897 // If slice_len is non-NULL, the slice_idx value must be a constant. 898 void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, 899 const TypePtr* adr_type, 900 Node* dest, 901 BasicType basic_elem_type, 902 Node* slice_idx, 903 Node* slice_len, 904 Node* dest_size) { 905 // one or the other but not both of slice_len and dest_size: 906 assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, ""); 907 if (slice_len == NULL) slice_len = top(); 908 if (dest_size == NULL) dest_size = top(); 909 910 uint alias_idx = C->get_alias_index(adr_type); 911 912 // operate on this memory slice: 913 Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on 914 915 // scaling and rounding of indexes: 916 int scale = exact_log2(type2aelembytes(basic_elem_type)); 917 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 918 int clear_low = (-1 << scale) & (BytesPerInt - 1); 919 int bump_bit = (-1 << scale) & BytesPerInt; 920 921 // determine constant starts and ends 922 const intptr_t BIG_NEG = -128; 923 assert(BIG_NEG + 2*abase < 0, "neg enough"); 924 intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG); 925 intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG); 926 if (slice_len_con == 0) { 927 return; // nothing to do here 928 } 929 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low; 930 intptr_t end_con = _igvn.find_intptr_t_con(dest_size, -1); 931 if (slice_idx_con >= 0 && slice_len_con >= 0) { 932 assert(end_con < 0, "not two cons"); 933 end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale), 934 BytesPerLong); 935 } 936 937 if (start_con >= 0 && end_con >= 0) { 938 // Constant start and end. Simple. 939 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, 940 start_con, end_con, &_igvn); 941 } else if (start_con >= 0 && dest_size != top()) { 942 // Constant start, pre-rounded end after the tail of the array. 943 Node* end = dest_size; 944 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, 945 start_con, end, &_igvn); 946 } else if (start_con >= 0 && slice_len != top()) { 947 // Constant start, non-constant end. End needs rounding up. 948 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8) 949 intptr_t end_base = abase + (slice_idx_con << scale); 950 int end_round = (-1 << scale) & (BytesPerLong - 1); 951 Node* end = ConvI2X(slice_len); 952 if (scale != 0) 953 end = transform_later( new(C,3) LShiftXNode(end, intcon(scale) )); 954 end_base += end_round; 955 end = transform_later( new(C,3) AddXNode(end, MakeConX(end_base)) ); 956 end = transform_later( new(C,3) AndXNode(end, MakeConX(~end_round)) ); 957 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, 958 start_con, end, &_igvn); 959 } else if (start_con < 0 && dest_size != top()) { 960 // Non-constant start, pre-rounded end after the tail of the array. 961 // This is almost certainly a "round-to-end" operation. 962 Node* start = slice_idx; 963 start = ConvI2X(start); 964 if (scale != 0) 965 start = transform_later( new(C,3) LShiftXNode( start, intcon(scale) )); 966 start = transform_later( new(C,3) AddXNode(start, MakeConX(abase)) ); 967 if ((bump_bit | clear_low) != 0) { 968 int to_clear = (bump_bit | clear_low); 969 // Align up mod 8, then store a jint zero unconditionally 970 // just before the mod-8 boundary. 971 if (((abase + bump_bit) & ~to_clear) - bump_bit 972 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) { 973 bump_bit = 0; 974 assert((abase & to_clear) == 0, "array base must be long-aligned"); 975 } else { 976 // Bump 'start' up to (or past) the next jint boundary: 977 start = transform_later( new(C,3) AddXNode(start, MakeConX(bump_bit)) ); 978 assert((abase & clear_low) == 0, "array base must be int-aligned"); 979 } 980 // Round bumped 'start' down to jlong boundary in body of array. 981 start = transform_later( new(C,3) AndXNode(start, MakeConX(~to_clear)) ); 982 if (bump_bit != 0) { 983 // Store a zero to the immediately preceding jint: 984 Node* x1 = transform_later( new(C,3) AddXNode(start, MakeConX(-bump_bit)) ); 985 Node* p1 = basic_plus_adr(dest, x1); 986 mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT); 987 mem = transform_later(mem); 988 } 989 } 990 Node* end = dest_size; // pre-rounded 991 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, 992 start, end, &_igvn); 993 } else { 994 // Non-constant start, unrounded non-constant end. 995 // (Nobody zeroes a random midsection of an array using this routine.) 996 ShouldNotReachHere(); // fix caller 997 } 998 999 // Done. 1000 merge_mem->set_memory_at(alias_idx, mem); 1001 } 1002 1003 bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io, 1004 const TypePtr* adr_type, 1005 BasicType basic_elem_type, 1006 AllocateNode* alloc, 1007 Node* src, Node* src_offset, 1008 Node* dest, Node* dest_offset, 1009 Node* dest_size, bool dest_uninitialized) { 1010 // See if there is an advantage from block transfer. 1011 int scale = exact_log2(type2aelembytes(basic_elem_type)); 1012 if (scale >= LogBytesPerLong) 1013 return false; // it is already a block transfer 1014 1015 // Look at the alignment of the starting offsets. 1016 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 1017 1018 intptr_t src_off_con = (intptr_t) _igvn.find_int_con(src_offset, -1); 1019 intptr_t dest_off_con = (intptr_t) _igvn.find_int_con(dest_offset, -1); 1020 if (src_off_con < 0 || dest_off_con < 0) 1021 // At present, we can only understand constants. 1022 return false; 1023 1024 intptr_t src_off = abase + (src_off_con << scale); 1025 intptr_t dest_off = abase + (dest_off_con << scale); 1026 1027 if (((src_off | dest_off) & (BytesPerLong-1)) != 0) { 1028 // Non-aligned; too bad. 1029 // One more chance: Pick off an initial 32-bit word. 1030 // This is a common case, since abase can be odd mod 8. 1031 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && 1032 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { 1033 Node* sptr = basic_plus_adr(src, src_off); 1034 Node* dptr = basic_plus_adr(dest, dest_off); 1035 uint alias_idx = C->get_alias_index(adr_type); 1036 Node* sval = transform_later(LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), sptr, adr_type, TypeInt::INT, T_INT)); 1037 Node* st = transform_later(StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), dptr, adr_type, sval, T_INT)); 1038 (*mem)->set_memory_at(alias_idx, st); 1039 src_off += BytesPerInt; 1040 dest_off += BytesPerInt; 1041 } else { 1042 return false; 1043 } 1044 } 1045 assert(src_off % BytesPerLong == 0, ""); 1046 assert(dest_off % BytesPerLong == 0, ""); 1047 1048 // Do this copy by giant steps. 1049 Node* sptr = basic_plus_adr(src, src_off); 1050 Node* dptr = basic_plus_adr(dest, dest_off); 1051 Node* countx = dest_size; 1052 countx = transform_later( new (C, 3) SubXNode(countx, MakeConX(dest_off)) ); 1053 countx = transform_later( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) ); 1054 1055 bool disjoint_bases = true; // since alloc != NULL 1056 generate_unchecked_arraycopy(ctrl, mem, 1057 adr_type, T_LONG, disjoint_bases, 1058 sptr, NULL, dptr, NULL, countx, dest_uninitialized); 1059 1060 return true; 1061 } 1062 1063 // Helper function; generates code for the slow case. 1064 // We make a call to a runtime method which emulates the native method, 1065 // but without the native wrapper overhead. 1066 MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac, 1067 Node** ctrl, Node* mem, Node** io, 1068 const TypePtr* adr_type, 1069 Node* src, Node* src_offset, 1070 Node* dest, Node* dest_offset, 1071 Node* copy_length, bool dest_uninitialized) { 1072 assert(!dest_uninitialized, "Invariant"); 1073 1074 const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type(); 1075 CallNode* call = new(C, call_type->domain()->cnt()) CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(), 1076 "slow_arraycopy", 1077 ac->jvms()->bci(), TypePtr::BOTTOM); 1078 1079 call->init_req(TypeFunc::Control, *ctrl ); 1080 call->init_req(TypeFunc::I_O , *io ); 1081 call->init_req(TypeFunc::Memory , mem ); 1082 call->init_req(TypeFunc::ReturnAdr, top() ); 1083 call->init_req(TypeFunc::FramePtr, top() ); 1084 call->init_req(TypeFunc::Parms+0, src); 1085 call->init_req(TypeFunc::Parms+1, src_offset); 1086 call->init_req(TypeFunc::Parms+2, dest); 1087 call->init_req(TypeFunc::Parms+3, dest_offset); 1088 call->init_req(TypeFunc::Parms+4, copy_length); 1089 copy_call_debug_info(ac, call); 1090 1091 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 1092 _igvn.replace_node(ac, call); 1093 transform_later(call); 1094 1095 extract_call_projections(call); 1096 *ctrl = _fallthroughcatchproj->clone(); 1097 transform_later(*ctrl); 1098 1099 Node* m = _memproj_fallthrough->clone(); 1100 transform_later(m); 1101 1102 uint alias_idx = C->get_alias_index(adr_type); 1103 MergeMemNode* out_mem; 1104 if (alias_idx != Compile::AliasIdxBot) { 1105 out_mem = MergeMemNode::make(C, mem); 1106 out_mem->set_memory_at(alias_idx, m); 1107 } else { 1108 out_mem = MergeMemNode::make(C, m); 1109 } 1110 transform_later(out_mem); 1111 1112 *io = _ioproj_fallthrough->clone(); 1113 transform_later(*io); 1114 1115 return out_mem; 1116 } 1117 1118 // Helper function; generates code for cases requiring runtime checks. 1119 Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem, 1120 const TypePtr* adr_type, 1121 Node* dest_elem_klass, 1122 Node* src, Node* src_offset, 1123 Node* dest, Node* dest_offset, 1124 Node* copy_length, bool dest_uninitialized) { 1125 if ((*ctrl)->is_top()) return NULL; 1126 1127 address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized); 1128 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. 1129 return NULL; 1130 } 1131 1132 // Pick out the parameters required to perform a store-check 1133 // for the target array. This is an optimistic check. It will 1134 // look in each non-null element's class, at the desired klass's 1135 // super_check_offset, for the desired klass. 1136 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1137 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 1138 Node* n3 = new(C, 3) LoadINode(NULL, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr()); 1139 Node* check_offset = ConvI2X(transform_later(n3)); 1140 Node* check_value = dest_elem_klass; 1141 1142 Node* src_start = array_element_address(src, src_offset, T_OBJECT); 1143 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); 1144 1145 const TypeFunc* call_type = OptoRuntime::checkcast_arraycopy_Type(); 1146 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "checkcast_arraycopy", adr_type, src_start, dest_start, copy_length XTOP, check_offset XTOP, check_value); 1147 1148 finish_arraycopy_call(call, ctrl, mem, adr_type); 1149 1150 Node* proj = new (C, 1) ProjNode(call, TypeFunc::Parms); 1151 transform_later(proj); 1152 1153 return proj; 1154 } 1155 1156 // Helper function; generates code for cases requiring runtime checks. 1157 Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem, 1158 const TypePtr* adr_type, 1159 Node* src, Node* src_offset, 1160 Node* dest, Node* dest_offset, 1161 Node* copy_length, bool dest_uninitialized) { 1162 if ((*ctrl)->is_top()) return NULL; 1163 assert(!dest_uninitialized, "Invariant"); 1164 1165 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1166 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. 1167 return NULL; 1168 } 1169 1170 const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type(); 1171 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "generic_arraycopy", adr_type, src, src_offset, dest, dest_offset, copy_length); 1172 1173 finish_arraycopy_call(call, ctrl, mem, adr_type); 1174 1175 Node* proj = new (C, 1) ProjNode(call, TypeFunc::Parms); 1176 transform_later(proj); 1177 1178 return proj; 1179 } 1180 1181 // Helper function; generates the fast out-of-line call to an arraycopy stub. 1182 void PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem, 1183 const TypePtr* adr_type, 1184 BasicType basic_elem_type, 1185 bool disjoint_bases, 1186 Node* src, Node* src_offset, 1187 Node* dest, Node* dest_offset, 1188 Node* copy_length, bool dest_uninitialized) { 1189 if ((*ctrl)->is_top()) return; 1190 1191 Node* src_start = src; 1192 Node* dest_start = dest; 1193 if (src_offset != NULL || dest_offset != NULL) { 1194 src_start = array_element_address(src, src_offset, basic_elem_type); 1195 dest_start = array_element_address(dest, dest_offset, basic_elem_type); 1196 } 1197 1198 // Figure out which arraycopy runtime method to call. 1199 const char* copyfunc_name = "arraycopy"; 1200 address copyfunc_addr = 1201 basictype2arraycopy(basic_elem_type, src_offset, dest_offset, 1202 disjoint_bases, copyfunc_name, dest_uninitialized); 1203 1204 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); 1205 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, copyfunc_name, adr_type, src_start, dest_start, copy_length XTOP); 1206 1207 finish_arraycopy_call(call, ctrl, mem, adr_type); 1208 }