1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "opto/arraycopynode.hpp" 27 #include "opto/graphKit.hpp" 28 #include "runtime/sharedRuntime.hpp" 29 30 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard) 31 : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM), 32 _alloc_tightly_coupled(alloc_tightly_coupled), 33 _has_negative_length_guard(has_negative_length_guard), 34 _kind(None), 35 _arguments_validated(false), 36 _src_type(TypeOopPtr::BOTTOM), 37 _dest_type(TypeOopPtr::BOTTOM) { 38 init_class_id(Class_ArrayCopy); 39 init_flags(Flag_is_macro); 40 C->add_macro_node(this); 41 } 42 43 uint ArrayCopyNode::size_of() const { return sizeof(*this); } 44 45 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw, 46 Node* src, Node* src_offset, 47 Node* dest, Node* dest_offset, 48 Node* length, 49 bool alloc_tightly_coupled, 50 bool has_negative_length_guard, 51 Node* src_klass, Node* dest_klass, 52 Node* src_length, Node* dest_length) { 53 54 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard); 55 Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac); 56 57 ac->init_req(ArrayCopyNode::Src, src); 58 ac->init_req(ArrayCopyNode::SrcPos, src_offset); 59 ac->init_req(ArrayCopyNode::Dest, dest); 60 ac->init_req(ArrayCopyNode::DestPos, dest_offset); 61 ac->init_req(ArrayCopyNode::Length, length); 62 ac->init_req(ArrayCopyNode::SrcLen, src_length); 63 ac->init_req(ArrayCopyNode::DestLen, dest_length); 64 ac->init_req(ArrayCopyNode::SrcKlass, src_klass); 65 ac->init_req(ArrayCopyNode::DestKlass, dest_klass); 66 67 if (may_throw) { 68 ac->set_req(TypeFunc::I_O , kit->i_o()); 69 kit->add_safepoint_edges(ac, false); 70 } 71 72 return ac; 73 } 74 75 void ArrayCopyNode::connect_outputs(GraphKit* kit) { 76 kit->set_all_memory_call(this, true); 77 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control))); 78 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O))); 79 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true); 80 kit->set_all_memory_call(this); 81 } 82 83 #ifndef PRODUCT 84 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"}; 85 86 void ArrayCopyNode::dump_spec(outputStream *st) const { 87 CallNode::dump_spec(st); 88 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : ""); 89 } 90 91 void ArrayCopyNode::dump_compact_spec(outputStream* st) const { 92 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : ""); 93 } 94 #endif 95 96 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const { 97 // check that length is constant 98 Node* length = in(ArrayCopyNode::Length); 99 const Type* length_type = phase->type(length); 100 101 if (length_type == Type::TOP) { 102 return -1; 103 } 104 105 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type"); 106 107 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1); 108 } 109 110 int ArrayCopyNode::get_count(PhaseGVN *phase) const { 111 Node* src = in(ArrayCopyNode::Src); 112 const Type* src_type = phase->type(src); 113 114 if (is_clonebasic()) { 115 if (src_type->isa_instptr()) { 116 const TypeInstPtr* inst_src = src_type->is_instptr(); 117 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); 118 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected 119 // fields into account. They are rare anyway so easier to simply 120 // skip instances with injected fields. 121 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) { 122 return -1; 123 } 124 int nb_fields = ik->nof_nonstatic_fields(); 125 return nb_fields; 126 } else { 127 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 128 assert (ary_src != NULL, "not an array or instance?"); 129 // clone passes a length as a rounded number of longs. If we're 130 // cloning an array we'll do it element by element. If the 131 // length input to ArrayCopyNode is constant, length of input 132 // array must be too. 133 134 assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() || 135 phase->is_IterGVN(), "inconsistent"); 136 137 if (ary_src->size()->is_con()) { 138 return ary_src->size()->get_con(); 139 } 140 return -1; 141 } 142 } 143 144 return get_length_if_constant(phase); 145 } 146 147 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) { 148 if (!is_clonebasic()) { 149 return NULL; 150 } 151 152 Node* src = in(ArrayCopyNode::Src); 153 Node* dest = in(ArrayCopyNode::Dest); 154 Node* ctl = in(TypeFunc::Control); 155 Node* in_mem = in(TypeFunc::Memory); 156 157 const Type* src_type = phase->type(src); 158 159 assert(src->is_AddP(), "should be base + off"); 160 assert(dest->is_AddP(), "should be base + off"); 161 Node* base_src = src->in(AddPNode::Base); 162 Node* base_dest = dest->in(AddPNode::Base); 163 164 MergeMemNode* mem = MergeMemNode::make(in_mem); 165 166 const TypeInstPtr* inst_src = src_type->isa_instptr(); 167 168 if (inst_src == NULL) { 169 return NULL; 170 } 171 172 if (!inst_src->klass_is_exact()) { 173 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); 174 assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy"); 175 phase->C->dependencies()->assert_leaf_type(ik); 176 } 177 178 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); 179 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields"); 180 181 for (int i = 0; i < count; i++) { 182 ciField* field = ik->nonstatic_field_at(i); 183 BasicType bt = field->layout_type(); 184 185 const Type *type; 186 if (bt == T_OBJECT) { 187 if (!field->type()->is_loaded()) { 188 type = TypeInstPtr::BOTTOM; 189 } else { 190 ciType* field_klass = field->type(); 191 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 192 } 193 if (UseLoadBarrier) { 194 if (can_reshape) { 195 PhaseIterGVN* igvn = phase->is_IterGVN(); 196 igvn->_worklist.push(mem); 197 } 198 return NodeSentinel; 199 } 200 } else { 201 type = Type::get_const_basic_type(bt); 202 } 203 204 int fieldidx = phase->C->alias_type(field)->index(); 205 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type(); 206 Node* off = phase->MakeConX(field->offset()); 207 Node* next_src = phase->transform(new AddPNode(base_src,base_src,off)); 208 Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off)); 209 210 Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered); 211 v = phase->transform(v); 212 Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered); 213 s = phase->transform(s); 214 mem->set_memory_at(fieldidx, s); 215 } 216 217 if (!finish_transform(phase, can_reshape, ctl, mem)) { 218 if (can_reshape) { 219 PhaseIterGVN* igvn = phase->is_IterGVN(); 220 igvn->_worklist.push(mem); 221 } 222 // Return NodeSentinel to indicate that the transform failed 223 return NodeSentinel; 224 } 225 226 return mem; 227 } 228 229 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, 230 Node*& adr_src, 231 Node*& base_src, 232 Node*& adr_dest, 233 Node*& base_dest, 234 BasicType& copy_type, 235 const Type*& value_type, 236 bool& disjoint_bases) { 237 Node* src = in(ArrayCopyNode::Src); 238 Node* dest = in(ArrayCopyNode::Dest); 239 const Type* src_type = phase->type(src); 240 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 241 242 if (is_arraycopy() || is_copyofrange() || is_copyof()) { 243 const Type* dest_type = phase->type(dest); 244 const TypeAryPtr* ary_dest = dest_type->isa_aryptr(); 245 Node* src_offset = in(ArrayCopyNode::SrcPos); 246 Node* dest_offset = in(ArrayCopyNode::DestPos); 247 248 // newly allocated object is guaranteed to not overlap with source object 249 disjoint_bases = is_alloc_tightly_coupled(); 250 251 if (ary_src == NULL || ary_src->klass() == NULL || 252 ary_dest == NULL || ary_dest->klass() == NULL) { 253 // We don't know if arguments are arrays 254 return false; 255 } 256 257 BasicType src_elem = ary_src->klass()->as_array_klass()->element_type()->basic_type(); 258 BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type(); 259 if (src_elem == T_ARRAY) src_elem = T_OBJECT; 260 if (dest_elem == T_ARRAY) dest_elem = T_OBJECT; 261 262 if (src_elem != dest_elem || dest_elem == T_VOID) { 263 // We don't know if arguments are arrays of the same type 264 return false; 265 } 266 267 if (dest_elem == T_OBJECT && (!is_alloc_tightly_coupled() || !GraphKit::use_ReduceInitialCardMarks())) { 268 // It's an object array copy but we can't emit the card marking 269 // that is needed 270 return false; 271 } 272 273 if (dest_elem == T_OBJECT && UseLoadBarrier) { 274 return false; 275 } 276 277 value_type = ary_src->elem(); 278 279 base_src = src; 280 base_dest = dest; 281 282 uint shift = exact_log2(type2aelembytes(dest_elem)); 283 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem); 284 285 adr_src = src; 286 adr_dest = dest; 287 288 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size()); 289 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size()); 290 291 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift))); 292 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift))); 293 294 adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale)); 295 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale)); 296 297 adr_src = new AddPNode(base_src, adr_src, phase->MakeConX(header)); 298 adr_dest = new AddPNode(base_dest, adr_dest, phase->MakeConX(header)); 299 300 adr_src = phase->transform(adr_src); 301 adr_dest = phase->transform(adr_dest); 302 303 copy_type = dest_elem; 304 } else { 305 assert(ary_src != NULL, "should be a clone"); 306 assert(is_clonebasic(), "should be"); 307 308 disjoint_bases = true; 309 assert(src->is_AddP(), "should be base + off"); 310 assert(dest->is_AddP(), "should be base + off"); 311 adr_src = src; 312 base_src = src->in(AddPNode::Base); 313 adr_dest = dest; 314 base_dest = dest->in(AddPNode::Base); 315 316 assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?"); 317 BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type(); 318 if (elem == T_ARRAY) elem = T_OBJECT; 319 320 if (elem == T_OBJECT && UseLoadBarrier) { 321 return false; 322 } 323 324 int diff = arrayOopDesc::base_offset_in_bytes(elem) - phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con(); 325 assert(diff >= 0, "clone should not start after 1st array element"); 326 if (diff > 0) { 327 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff))); 328 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff))); 329 } 330 331 copy_type = elem; 332 value_type = ary_src->elem(); 333 } 334 return true; 335 } 336 337 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) { 338 const Type* at = phase->type(n); 339 assert(at != Type::TOP, "unexpected type"); 340 const TypePtr* atp = at->isa_ptr(); 341 // adjust atp to be the correct array element address type 342 atp = atp->add_offset(Type::OffsetBot); 343 return atp; 344 } 345 346 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) { 347 Node* ctl = in(TypeFunc::Control); 348 if (!disjoint_bases && count > 1) { 349 Node* src_offset = in(ArrayCopyNode::SrcPos); 350 Node* dest_offset = in(ArrayCopyNode::DestPos); 351 assert(src_offset != NULL && dest_offset != NULL, "should be"); 352 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset)); 353 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt)); 354 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN); 355 356 phase->transform(iff); 357 358 forward_ctl = phase->transform(new IfFalseNode(iff)); 359 backward_ctl = phase->transform(new IfTrueNode(iff)); 360 } else { 361 forward_ctl = ctl; 362 } 363 } 364 365 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase, 366 bool can_reshape, 367 Node* forward_ctl, 368 Node* start_mem_src, 369 Node* start_mem_dest, 370 const TypePtr* atp_src, 371 const TypePtr* atp_dest, 372 Node* adr_src, 373 Node* base_src, 374 Node* adr_dest, 375 Node* base_dest, 376 BasicType copy_type, 377 const Type* value_type, 378 int count) { 379 guarantee(!UseLoadBarrier || copy_type != T_OBJECT, "Must be"); 380 Node* mem = phase->C->top(); 381 if (!forward_ctl->is_top()) { 382 // copy forward 383 mem = start_mem_dest; 384 385 if (count > 0) { 386 Node* v = LoadNode::make(*phase, forward_ctl, start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered); 387 v = phase->transform(v); 388 mem = StoreNode::make(*phase, forward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered); 389 mem = phase->transform(mem); 390 for (int i = 1; i < count; i++) { 391 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); 392 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); 393 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); 394 v = LoadNode::make(*phase, forward_ctl, mem, next_src, atp_src, value_type, copy_type, MemNode::unordered); 395 v = phase->transform(v); 396 mem = StoreNode::make(*phase, forward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered); 397 mem = phase->transform(mem); 398 } 399 } else if(can_reshape) { 400 PhaseIterGVN* igvn = phase->is_IterGVN(); 401 igvn->_worklist.push(adr_src); 402 igvn->_worklist.push(adr_dest); 403 } 404 } 405 return mem; 406 } 407 408 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase, 409 bool can_reshape, 410 Node* backward_ctl, 411 Node* start_mem_src, 412 Node* start_mem_dest, 413 const TypePtr* atp_src, 414 const TypePtr* atp_dest, 415 Node* adr_src, 416 Node* base_src, 417 Node* adr_dest, 418 Node* base_dest, 419 BasicType copy_type, 420 const Type* value_type, 421 int count) { 422 guarantee(!UseLoadBarrier || copy_type != T_OBJECT, "Must be"); 423 Node* mem = phase->C->top(); 424 if (!backward_ctl->is_top()) { 425 // copy backward 426 mem = start_mem_dest; 427 428 if (count > 0) { 429 for (int i = count-1; i >= 1; i--) { 430 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); 431 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); 432 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); 433 Node* v = LoadNode::make(*phase, backward_ctl, mem, next_src, atp_src, value_type, copy_type, MemNode::unordered); 434 v = phase->transform(v); 435 mem = StoreNode::make(*phase, backward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered); 436 mem = phase->transform(mem); 437 } 438 Node* v = LoadNode::make(*phase, backward_ctl, mem, adr_src, atp_src, value_type, copy_type, MemNode::unordered); 439 v = phase->transform(v); 440 mem = StoreNode::make(*phase, backward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered); 441 mem = phase->transform(mem); 442 } else if(can_reshape) { 443 PhaseIterGVN* igvn = phase->is_IterGVN(); 444 igvn->_worklist.push(adr_src); 445 igvn->_worklist.push(adr_dest); 446 } 447 } 448 return mem; 449 } 450 451 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape, 452 Node* ctl, Node *mem) { 453 if (can_reshape) { 454 PhaseIterGVN* igvn = phase->is_IterGVN(); 455 igvn->set_delay_transform(false); 456 if (is_clonebasic()) { 457 Node* out_mem = proj_out(TypeFunc::Memory); 458 459 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() || 460 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) { 461 assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking"); 462 return false; 463 } 464 465 igvn->replace_node(out_mem->raw_out(0), mem); 466 467 Node* out_ctl = proj_out(TypeFunc::Control); 468 igvn->replace_node(out_ctl, ctl); 469 } else { 470 // replace fallthrough projections of the ArrayCopyNode by the 471 // new memory, control and the input IO. 472 CallProjections callprojs; 473 extract_projections(&callprojs, true, false); 474 475 if (callprojs.fallthrough_ioproj != NULL) { 476 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O)); 477 } 478 if (callprojs.fallthrough_memproj != NULL) { 479 igvn->replace_node(callprojs.fallthrough_memproj, mem); 480 } 481 if (callprojs.fallthrough_catchproj != NULL) { 482 igvn->replace_node(callprojs.fallthrough_catchproj, ctl); 483 } 484 485 // The ArrayCopyNode is not disconnected. It still has the 486 // projections for the exception case. Replace current 487 // ArrayCopyNode with a dummy new one with a top() control so 488 // that this part of the graph stays consistent but is 489 // eventually removed. 490 491 set_req(0, phase->C->top()); 492 remove_dead_region(phase, can_reshape); 493 } 494 } else { 495 if (in(TypeFunc::Control) != ctl) { 496 // we can't return new memory and control from Ideal at parse time 497 assert(!is_clonebasic(), "added control for clone?"); 498 return false; 499 } 500 } 501 return true; 502 } 503 504 505 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { 506 if (remove_dead_region(phase, can_reshape)) return this; 507 508 if (StressArrayCopyMacroNode && !can_reshape) { 509 phase->record_for_igvn(this); 510 return NULL; 511 } 512 513 // See if it's a small array copy and we can inline it as 514 // loads/stores 515 // Here we can only do: 516 // - arraycopy if all arguments were validated before and we don't 517 // need card marking 518 // - clone for which we don't need to do card marking 519 520 if (!is_clonebasic() && !is_arraycopy_validated() && 521 !is_copyofrange_validated() && !is_copyof_validated()) { 522 return NULL; 523 } 524 525 assert(in(TypeFunc::Control) != NULL && 526 in(TypeFunc::Memory) != NULL && 527 in(ArrayCopyNode::Src) != NULL && 528 in(ArrayCopyNode::Dest) != NULL && 529 in(ArrayCopyNode::Length) != NULL && 530 ((in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::DestPos) != NULL) || 531 is_clonebasic()), "broken inputs"); 532 533 if (in(TypeFunc::Control)->is_top() || 534 in(TypeFunc::Memory)->is_top() || 535 phase->type(in(ArrayCopyNode::Src)) == Type::TOP || 536 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP || 537 (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) || 538 (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) { 539 return NULL; 540 } 541 542 int count = get_count(phase); 543 544 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) { 545 return NULL; 546 } 547 548 Node* mem = try_clone_instance(phase, can_reshape, count); 549 if (mem != NULL) { 550 return (mem == NodeSentinel) ? NULL : mem; 551 } 552 553 Node* adr_src = NULL; 554 Node* base_src = NULL; 555 Node* adr_dest = NULL; 556 Node* base_dest = NULL; 557 BasicType copy_type = T_ILLEGAL; 558 const Type* value_type = NULL; 559 bool disjoint_bases = false; 560 561 if (!prepare_array_copy(phase, can_reshape, 562 adr_src, base_src, adr_dest, base_dest, 563 copy_type, value_type, disjoint_bases)) { 564 return NULL; 565 } 566 567 Node* src = in(ArrayCopyNode::Src); 568 Node* dest = in(ArrayCopyNode::Dest); 569 const TypePtr* atp_src = get_address_type(phase, src); 570 const TypePtr* atp_dest = get_address_type(phase, dest); 571 uint alias_idx_src = phase->C->get_alias_index(atp_src); 572 uint alias_idx_dest = phase->C->get_alias_index(atp_dest); 573 574 Node *in_mem = in(TypeFunc::Memory); 575 Node *start_mem_src = in_mem; 576 Node *start_mem_dest = in_mem; 577 if (in_mem->is_MergeMem()) { 578 start_mem_src = in_mem->as_MergeMem()->memory_at(alias_idx_src); 579 start_mem_dest = in_mem->as_MergeMem()->memory_at(alias_idx_dest); 580 } 581 582 583 if (can_reshape) { 584 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); 585 phase->is_IterGVN()->set_delay_transform(true); 586 } 587 588 Node* backward_ctl = phase->C->top(); 589 Node* forward_ctl = phase->C->top(); 590 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl); 591 592 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl, 593 start_mem_src, start_mem_dest, 594 atp_src, atp_dest, 595 adr_src, base_src, adr_dest, base_dest, 596 copy_type, value_type, count); 597 598 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl, 599 start_mem_src, start_mem_dest, 600 atp_src, atp_dest, 601 adr_src, base_src, adr_dest, base_dest, 602 copy_type, value_type, count); 603 604 Node* ctl = NULL; 605 if (!forward_ctl->is_top() && !backward_ctl->is_top()) { 606 ctl = new RegionNode(3); 607 mem = new PhiNode(ctl, Type::MEMORY, atp_dest); 608 ctl->init_req(1, forward_ctl); 609 mem->init_req(1, forward_mem); 610 ctl->init_req(2, backward_ctl); 611 mem->init_req(2, backward_mem); 612 ctl = phase->transform(ctl); 613 mem = phase->transform(mem); 614 } else if (!forward_ctl->is_top()) { 615 ctl = forward_ctl; 616 mem = forward_mem; 617 } else { 618 assert(!backward_ctl->is_top(), "no copy?"); 619 ctl = backward_ctl; 620 mem = backward_mem; 621 } 622 623 if (can_reshape) { 624 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms"); 625 phase->is_IterGVN()->set_delay_transform(false); 626 } 627 628 MergeMemNode* out_mem = MergeMemNode::make(in_mem); 629 out_mem->set_memory_at(alias_idx_dest, mem); 630 mem = out_mem; 631 632 if (!finish_transform(phase, can_reshape, ctl, mem)) { 633 return NULL; 634 } 635 636 return mem; 637 } 638 639 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { 640 Node* dest = in(ArrayCopyNode::Dest); 641 if (dest->is_top()) { 642 return false; 643 } 644 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr(); 645 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded"); 646 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() || 647 _src_type->is_known_instance(), "result of EA not recorded"); 648 649 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) { 650 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance"); 651 return t_oop->instance_id() == _dest_type->instance_id(); 652 } 653 654 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase); 655 } 656 657 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) { 658 if (n != NULL && 659 n->is_Call() && 660 n->as_Call()->may_modify(t_oop, phase) && 661 (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) { 662 call = n->as_Call(); 663 return true; 664 } 665 return false; 666 } 667 668 static Node* step_over_gc_barrier(Node* c) { 669 if (UseG1GC && !GraphKit::use_ReduceInitialCardMarks() && 670 c != NULL && c->is_Region() && c->req() == 3) { 671 for (uint i = 1; i < c->req(); i++) { 672 if (c->in(i) != NULL && c->in(i)->is_Region() && 673 c->in(i)->req() == 3) { 674 Node* r = c->in(i); 675 for (uint j = 1; j < r->req(); j++) { 676 if (r->in(j) != NULL && r->in(j)->is_Proj() && 677 r->in(j)->in(0) != NULL && 678 r->in(j)->in(0)->Opcode() == Op_CallLeaf && 679 r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post)) { 680 Node* call = r->in(j)->in(0); 681 c = c->in(i == 1 ? 2 : 1); 682 if (c != NULL) { 683 c = c->in(0); 684 if (c != NULL) { 685 c = c->in(0); 686 assert(call->in(0) == NULL || 687 call->in(0)->in(0) == NULL || 688 call->in(0)->in(0)->in(0) == NULL || 689 call->in(0)->in(0)->in(0)->in(0) == NULL || 690 call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL || 691 c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape"); 692 return c; 693 } 694 } 695 } 696 } 697 } 698 } 699 } 700 return c; 701 } 702 703 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac) { 704 705 Node* c = mb->in(0); 706 707 // step over g1 gc barrier if we're at a clone with ReduceInitialCardMarks off 708 c = step_over_gc_barrier(c); 709 710 CallNode* call = NULL; 711 if (c != NULL && c->is_Region()) { 712 for (uint i = 1; i < c->req(); i++) { 713 if (c->in(i) != NULL) { 714 Node* n = c->in(i)->in(0); 715 if (may_modify_helper(t_oop, n, phase, call)) { 716 ac = call->isa_ArrayCopy(); 717 assert(c == mb->in(0), "only for clone"); 718 return true; 719 } 720 } 721 } 722 } else if (may_modify_helper(t_oop, c->in(0), phase, call)) { 723 ac = call->isa_ArrayCopy(); 724 assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !GraphKit::use_ReduceInitialCardMarks()), "only for clone"); 725 return true; 726 } 727 728 return false; 729 } 730 731 // Does this array copy modify offsets between offset_lo and offset_hi 732 // in the destination array 733 // if must_modify is false, return true if the copy could write 734 // between offset_lo and offset_hi 735 // if must_modify is true, return true if the copy is guaranteed to 736 // write between offset_lo and offset_hi 737 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const { 738 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies"); 739 740 Node* dest = in(Dest); 741 Node* dest_pos = in(DestPos); 742 Node* len = in(Length); 743 744 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int(); 745 const TypeInt *len_t = phase->type(len)->isa_int(); 746 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr(); 747 748 if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) { 749 return !must_modify; 750 } 751 752 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type(); 753 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem); 754 uint elemsize = type2aelembytes(ary_elem); 755 756 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header; 757 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header; 758 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header; 759 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header; 760 761 if (must_modify) { 762 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) { 763 return true; 764 } 765 } else { 766 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) { 767 return true; 768 } 769 } 770 return false; 771 } 772