1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/c2/barrierSetC2.hpp" 28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp" 29 #include "opto/arraycopynode.hpp" 30 #include "opto/graphKit.hpp" 31 #include "opto/valuetypenode.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "utilities/macros.hpp" 34 35 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard) 36 : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM), 37 _kind(None), 38 _alloc_tightly_coupled(alloc_tightly_coupled), 39 _has_negative_length_guard(has_negative_length_guard), 40 _arguments_validated(false), 41 _src_type(TypeOopPtr::BOTTOM), 42 _dest_type(TypeOopPtr::BOTTOM) { 43 init_class_id(Class_ArrayCopy); 44 init_flags(Flag_is_macro); 45 C->add_macro_node(this); 46 } 47 48 uint ArrayCopyNode::size_of() const { return sizeof(*this); } 49 50 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw, 51 Node* src, Node* src_offset, 52 Node* dest, Node* dest_offset, 53 Node* length, 54 bool alloc_tightly_coupled, 55 bool has_negative_length_guard, 56 Node* src_klass, Node* dest_klass, 57 Node* src_length, Node* dest_length) { 58 59 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard); 60 Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac); 61 62 ac->init_req(ArrayCopyNode::Src, src); 63 ac->init_req(ArrayCopyNode::SrcPos, src_offset); 64 ac->init_req(ArrayCopyNode::Dest, dest); 65 ac->init_req(ArrayCopyNode::DestPos, dest_offset); 66 ac->init_req(ArrayCopyNode::Length, length); 67 ac->init_req(ArrayCopyNode::SrcLen, src_length); 68 ac->init_req(ArrayCopyNode::DestLen, dest_length); 69 ac->init_req(ArrayCopyNode::SrcKlass, src_klass); 70 ac->init_req(ArrayCopyNode::DestKlass, dest_klass); 71 72 if (may_throw) { 73 ac->set_req(TypeFunc::I_O , kit->i_o()); 74 kit->add_safepoint_edges(ac, false); 75 } 76 77 return ac; 78 } 79 80 void ArrayCopyNode::connect_outputs(GraphKit* kit) { 81 kit->set_all_memory_call(this, true); 82 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control))); 83 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O))); 84 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true); 85 kit->set_all_memory_call(this); 86 } 87 88 #ifndef PRODUCT 89 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"}; 90 91 void ArrayCopyNode::dump_spec(outputStream *st) const { 92 CallNode::dump_spec(st); 93 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : ""); 94 } 95 96 void ArrayCopyNode::dump_compact_spec(outputStream* st) const { 97 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : ""); 98 } 99 #endif 100 101 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const { 102 // check that length is constant 103 Node* length = in(ArrayCopyNode::Length); 104 const Type* length_type = phase->type(length); 105 106 if (length_type == Type::TOP) { 107 return -1; 108 } 109 110 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type"); 111 112 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1); 113 } 114 115 int ArrayCopyNode::get_count(PhaseGVN *phase) const { 116 if (is_clonebasic()) { 117 Node* src = in(ArrayCopyNode::Src); 118 const Type* src_type = phase->type(src); 119 120 if (src_type == Type::TOP) { 121 return -1; 122 } 123 124 if (src_type->isa_instptr()) { 125 const TypeInstPtr* inst_src = src_type->is_instptr(); 126 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); 127 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected 128 // fields into account. They are rare anyway so easier to simply 129 // skip instances with injected fields. 130 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) { 131 return -1; 132 } 133 int nb_fields = ik->nof_nonstatic_fields(); 134 return nb_fields; 135 } else { 136 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 137 assert (ary_src != NULL, "not an array or instance?"); 138 // clone passes a length as a rounded number of longs. If we're 139 // cloning an array we'll do it element by element. If the 140 // length input to ArrayCopyNode is constant, length of input 141 // array must be too. 142 143 assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() || 144 (ValueArrayFlatten && ary_src->elem()->make_oopptr() != NULL && ary_src->elem()->make_oopptr()->can_be_value_type()) || 145 phase->is_IterGVN() || phase->C->inlining_incrementally(), "inconsistent"); 146 147 if (ary_src->size()->is_con()) { 148 return ary_src->size()->get_con(); 149 } 150 return -1; 151 } 152 } 153 154 return get_length_if_constant(phase); 155 } 156 157 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) { 158 if (!is_clonebasic()) { 159 return NULL; 160 } 161 162 Node* src = in(ArrayCopyNode::Src); 163 Node* dest = in(ArrayCopyNode::Dest); 164 Node* ctl = in(TypeFunc::Control); 165 Node* in_mem = in(TypeFunc::Memory); 166 167 const Type* src_type = phase->type(src); 168 169 assert(src->is_AddP(), "should be base + off"); 170 assert(dest->is_AddP(), "should be base + off"); 171 Node* base_src = src->in(AddPNode::Base); 172 Node* base_dest = dest->in(AddPNode::Base); 173 174 MergeMemNode* mem = MergeMemNode::make(in_mem); 175 176 const TypeInstPtr* inst_src = src_type->isa_instptr(); 177 178 if (inst_src == NULL) { 179 return NULL; 180 } 181 182 if (!inst_src->klass_is_exact()) { 183 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); 184 assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy"); 185 phase->C->dependencies()->assert_leaf_type(ik); 186 } 187 188 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); 189 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields"); 190 191 for (int i = 0; i < count; i++) { 192 ciField* field = ik->nonstatic_field_at(i); 193 int fieldidx = phase->C->alias_type(field)->index(); 194 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type(); 195 Node* off = phase->MakeConX(field->offset()); 196 Node* next_src = phase->transform(new AddPNode(base_src,base_src,off)); 197 Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off)); 198 BasicType bt = field->layout_type(); 199 200 const Type *type; 201 if (bt == T_OBJECT) { 202 if (!field->type()->is_loaded()) { 203 type = TypeInstPtr::BOTTOM; 204 } else { 205 ciType* field_klass = field->type(); 206 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 207 } 208 } else { 209 type = Type::get_const_basic_type(bt); 210 } 211 212 Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered); 213 v = phase->transform(v); 214 Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered); 215 s = phase->transform(s); 216 mem->set_memory_at(fieldidx, s); 217 } 218 219 if (!finish_transform(phase, can_reshape, ctl, mem)) { 220 // Return NodeSentinel to indicate that the transform failed 221 return NodeSentinel; 222 } 223 224 return mem; 225 } 226 227 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, 228 Node*& adr_src, 229 Node*& base_src, 230 Node*& adr_dest, 231 Node*& base_dest, 232 BasicType& copy_type, 233 const Type*& value_type, 234 bool& disjoint_bases) { 235 Node* src = in(ArrayCopyNode::Src); 236 Node* dest = in(ArrayCopyNode::Dest); 237 const Type* src_type = phase->type(src); 238 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 239 240 if (is_arraycopy() || is_copyofrange() || is_copyof()) { 241 const Type* dest_type = phase->type(dest); 242 const TypeAryPtr* ary_dest = dest_type->isa_aryptr(); 243 Node* src_offset = in(ArrayCopyNode::SrcPos); 244 Node* dest_offset = in(ArrayCopyNode::DestPos); 245 246 // newly allocated object is guaranteed to not overlap with source object 247 disjoint_bases = is_alloc_tightly_coupled(); 248 249 if (ary_src == NULL || ary_src->klass() == NULL || 250 ary_dest == NULL || ary_dest->klass() == NULL) { 251 // We don't know if arguments are arrays 252 return false; 253 } 254 255 BasicType src_elem = ary_src->klass()->as_array_klass()->element_type()->basic_type(); 256 BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type(); 257 if (src_elem == T_ARRAY || 258 (src_elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass())) { 259 src_elem = T_OBJECT; 260 } 261 if (dest_elem == T_ARRAY || 262 (dest_elem == T_VALUETYPE && ary_dest->klass()->is_obj_array_klass())) { 263 dest_elem = T_OBJECT; 264 } 265 266 if (src_elem != dest_elem || dest_elem == T_VOID) { 267 // We don't know if arguments are arrays of the same type 268 return false; 269 } 270 271 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 272 if (dest_elem == T_OBJECT && (!is_alloc_tightly_coupled() || 273 bs->array_copy_requires_gc_barriers(T_OBJECT))) { 274 // It's an object array copy but we can't emit the card marking 275 // that is needed 276 return false; 277 } 278 279 value_type = ary_src->elem(); 280 281 base_src = src; 282 base_dest = dest; 283 284 uint shift = exact_log2(type2aelembytes(dest_elem)); 285 if (dest_elem == T_VALUETYPE) { 286 ciValueArrayKlass* vak = ary_src->klass()->as_value_array_klass(); 287 shift = vak->log2_element_size(); 288 } 289 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem); 290 291 adr_src = src; 292 adr_dest = dest; 293 294 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size()); 295 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size()); 296 297 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift))); 298 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift))); 299 300 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header))); 301 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header))); 302 303 adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale)); 304 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale)); 305 306 copy_type = dest_elem; 307 } else { 308 assert(ary_src != NULL, "should be a clone"); 309 assert(is_clonebasic(), "should be"); 310 311 disjoint_bases = true; 312 assert(src->is_AddP(), "should be base + off"); 313 assert(dest->is_AddP(), "should be base + off"); 314 adr_src = src; 315 base_src = src->in(AddPNode::Base); 316 adr_dest = dest; 317 base_dest = dest->in(AddPNode::Base); 318 319 assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?"); 320 321 if (ary_src->elem()->make_oopptr() != NULL && 322 ary_src->elem()->make_oopptr()->can_be_value_type()) { 323 return false; 324 } 325 326 BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type(); 327 if (elem == T_ARRAY || 328 (elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass())) { 329 elem = T_OBJECT; 330 } 331 332 int diff = arrayOopDesc::base_offset_in_bytes(elem) - phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con(); 333 assert(diff >= 0, "clone should not start after 1st array element"); 334 if (diff > 0) { 335 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff))); 336 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff))); 337 } 338 339 copy_type = elem; 340 value_type = ary_src->elem(); 341 } 342 return true; 343 } 344 345 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) { 346 const Type* at = phase->type(n); 347 assert(at != Type::TOP, "unexpected type"); 348 const TypeAryPtr* atp = at->is_aryptr(); 349 // adjust atp to be the correct array element address type 350 atp = atp->add_offset(Type::OffsetBot)->is_aryptr(); 351 return atp; 352 } 353 354 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) { 355 Node* ctl = kit.control(); 356 if (!disjoint_bases && count > 1) { 357 PhaseGVN& gvn = kit.gvn(); 358 Node* src_offset = in(ArrayCopyNode::SrcPos); 359 Node* dest_offset = in(ArrayCopyNode::DestPos); 360 assert(src_offset != NULL && dest_offset != NULL, "should be"); 361 Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset)); 362 Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt)); 363 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN); 364 365 gvn.transform(iff); 366 367 kit.set_control(gvn.transform(new IfFalseNode(iff))); 368 backward_ctl = gvn.transform(new IfTrueNode(iff)); 369 } 370 } 371 372 void ArrayCopyNode::copy(GraphKit& kit, 373 const TypeAryPtr* atp_src, 374 const TypeAryPtr* atp_dest, 375 int i, 376 Node* base_src, 377 Node* base_dest, 378 Node* adr_src, 379 Node* adr_dest, 380 BasicType copy_type, 381 const Type* value_type) { 382 if (copy_type == T_VALUETYPE) { 383 ciValueArrayKlass* vak = atp_src->klass()->as_value_array_klass(); 384 ciValueKlass* vk = vak->element_klass()->as_value_klass(); 385 for (int j = 0; j < vk->nof_nonstatic_fields(); j++) { 386 ciField* field = vk->nonstatic_field_at(j); 387 int off_in_vt = field->offset() - vk->first_field_offset(); 388 Node* off = kit.MakeConX(off_in_vt + i * vak->element_byte_size()); 389 ciType* ft = field->type(); 390 BasicType bt = type2field[ft->basic_type()]; 391 assert(!field->is_flattened(), "flattened field encountered"); 392 if (bt == T_VALUETYPE) { 393 bt = T_OBJECT; 394 } 395 const Type* rt = Type::get_const_type(ft); 396 const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); 397 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off)); 398 Node* v = kit.make_load(kit.control(), next_src, rt, bt, adr_type, MemNode::unordered); 399 400 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off)); 401 if (is_java_primitive(bt)) { 402 kit.store_to_memory(kit.control(), next_dest, v, bt, adr_type, MemNode::unordered); 403 } else { 404 const TypeOopPtr* val_type = Type::get_const_type(ft)->is_oopptr(); 405 kit.access_store_at(kit.control(), base_dest, next_dest, adr_type, v, 406 val_type, bt, StoreNode::release_if_reference(T_OBJECT)); 407 } 408 } 409 } else { 410 Node* off = kit.MakeConX(type2aelembytes(copy_type) * i); 411 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off)); 412 Node* v = kit.make_load(kit.control(), next_src, value_type, copy_type, atp_src, MemNode::unordered); 413 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off)); 414 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 415 if (copy_type == T_OBJECT && (!is_alloc_tightly_coupled() || bs->array_copy_requires_gc_barriers(T_OBJECT))) { 416 kit.access_store_at(kit.control(), base_dest, next_dest, atp_dest, v, 417 value_type->make_ptr()->is_oopptr(), copy_type, 418 StoreNode::release_if_reference(T_OBJECT)); 419 } else { 420 kit.store_to_memory(kit.control(), next_dest, v, copy_type, atp_dest, MemNode::unordered); 421 } 422 } 423 } 424 425 426 void ArrayCopyNode::array_copy_forward(GraphKit& kit, 427 bool can_reshape, 428 const TypeAryPtr* atp_src, 429 const TypeAryPtr* atp_dest, 430 Node* adr_src, 431 Node* base_src, 432 Node* adr_dest, 433 Node* base_dest, 434 BasicType copy_type, 435 const Type* value_type, 436 int count) { 437 if (!kit.stopped()) { 438 // copy forward 439 if (count > 0) { 440 for (int i = 0; i < count; i++) { 441 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type); 442 } 443 } else if(can_reshape) { 444 PhaseGVN& gvn = kit.gvn(); 445 assert(gvn.is_IterGVN(), ""); 446 gvn.record_for_igvn(adr_src); 447 gvn.record_for_igvn(adr_dest); 448 } 449 } 450 } 451 452 void ArrayCopyNode::array_copy_backward(GraphKit& kit, 453 bool can_reshape, 454 const TypeAryPtr* atp_src, 455 const TypeAryPtr* atp_dest, 456 Node* adr_src, 457 Node* base_src, 458 Node* adr_dest, 459 Node* base_dest, 460 BasicType copy_type, 461 const Type* value_type, 462 int count) { 463 if (!kit.stopped()) { 464 // copy backward 465 PhaseGVN& gvn = kit.gvn(); 466 467 if (count > 0) { 468 for (int i = count-1; i >= 0; i--) { 469 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type); 470 } 471 } else if(can_reshape) { 472 PhaseGVN& gvn = kit.gvn(); 473 assert(gvn.is_IterGVN(), ""); 474 gvn.record_for_igvn(adr_src); 475 gvn.record_for_igvn(adr_dest); 476 } 477 } 478 } 479 480 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape, 481 Node* ctl, Node *mem) { 482 if (can_reshape) { 483 PhaseIterGVN* igvn = phase->is_IterGVN(); 484 igvn->set_delay_transform(false); 485 if (is_clonebasic()) { 486 Node* out_mem = proj_out(TypeFunc::Memory); 487 488 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 489 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() || 490 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) { 491 assert(bs->array_copy_requires_gc_barriers(T_OBJECT), "can only happen with card marking"); 492 return false; 493 } 494 495 igvn->replace_node(out_mem->raw_out(0), mem); 496 497 Node* out_ctl = proj_out(TypeFunc::Control); 498 igvn->replace_node(out_ctl, ctl); 499 } else { 500 // replace fallthrough projections of the ArrayCopyNode by the 501 // new memory, control and the input IO. 502 CallProjections* callprojs = extract_projections(true, false); 503 504 if (callprojs->fallthrough_ioproj != NULL) { 505 igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O)); 506 } 507 if (callprojs->fallthrough_memproj != NULL) { 508 igvn->replace_node(callprojs->fallthrough_memproj, mem); 509 } 510 if (callprojs->fallthrough_catchproj != NULL) { 511 igvn->replace_node(callprojs->fallthrough_catchproj, ctl); 512 } 513 514 // The ArrayCopyNode is not disconnected. It still has the 515 // projections for the exception case. Replace current 516 // ArrayCopyNode with a dummy new one with a top() control so 517 // that this part of the graph stays consistent but is 518 // eventually removed. 519 520 set_req(0, phase->C->top()); 521 remove_dead_region(phase, can_reshape); 522 } 523 } else { 524 if (in(TypeFunc::Control) != ctl) { 525 // we can't return new memory and control from Ideal at parse time 526 #ifdef ASSERT 527 Node* src = in(ArrayCopyNode::Src); 528 const Type* src_type = phase->type(src); 529 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 530 BasicType elem = ary_src != NULL ? ary_src->klass()->as_array_klass()->element_type()->basic_type() : T_CONFLICT; 531 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 532 assert(!is_clonebasic() || bs->array_copy_requires_gc_barriers(T_OBJECT) || (ary_src != NULL && elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass()), "added control for clone?"); 533 #endif 534 return false; 535 } 536 } 537 return true; 538 } 539 540 541 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { 542 // Perform any generic optimizations first 543 Node* result = SafePointNode::Ideal(phase, can_reshape); 544 if (result != NULL) { 545 return result; 546 } 547 548 if (StressArrayCopyMacroNode && !can_reshape) { 549 phase->record_for_igvn(this); 550 return NULL; 551 } 552 553 // See if it's a small array copy and we can inline it as 554 // loads/stores 555 // Here we can only do: 556 // - arraycopy if all arguments were validated before and we don't 557 // need card marking 558 // - clone for which we don't need to do card marking 559 560 if (!is_clonebasic() && !is_arraycopy_validated() && 561 !is_copyofrange_validated() && !is_copyof_validated()) { 562 return NULL; 563 } 564 565 assert(in(TypeFunc::Control) != NULL && 566 in(TypeFunc::Memory) != NULL && 567 in(ArrayCopyNode::Src) != NULL && 568 in(ArrayCopyNode::Dest) != NULL && 569 in(ArrayCopyNode::Length) != NULL && 570 ((in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::DestPos) != NULL) || 571 is_clonebasic()), "broken inputs"); 572 573 if (in(TypeFunc::Control)->is_top() || 574 in(TypeFunc::Memory)->is_top() || 575 phase->type(in(ArrayCopyNode::Src)) == Type::TOP || 576 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP || 577 (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) || 578 (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) { 579 return NULL; 580 } 581 582 int count = get_count(phase); 583 584 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) { 585 return NULL; 586 } 587 588 Node* mem = try_clone_instance(phase, can_reshape, count); 589 if (mem != NULL) { 590 return (mem == NodeSentinel) ? NULL : mem; 591 } 592 593 Node* adr_src = NULL; 594 Node* base_src = NULL; 595 Node* adr_dest = NULL; 596 Node* base_dest = NULL; 597 BasicType copy_type = T_ILLEGAL; 598 const Type* value_type = NULL; 599 bool disjoint_bases = false; 600 601 if (!prepare_array_copy(phase, can_reshape, 602 adr_src, base_src, adr_dest, base_dest, 603 copy_type, value_type, disjoint_bases)) { 604 return NULL; 605 } 606 607 JVMState* new_jvms = NULL; 608 SafePointNode* new_map = NULL; 609 if (!is_clonebasic()) { 610 new_jvms = jvms()->clone_shallow(phase->C); 611 new_map = new SafePointNode(req(), new_jvms); 612 for (uint i = TypeFunc::FramePtr; i < req(); i++) { 613 new_map->init_req(i, in(i)); 614 } 615 new_jvms->set_map(new_map); 616 } else { 617 new_jvms = new (phase->C) JVMState(0); 618 new_map = new SafePointNode(TypeFunc::Parms, new_jvms); 619 new_jvms->set_map(new_map); 620 } 621 new_map->set_control(in(TypeFunc::Control)); 622 new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory))); 623 new_map->set_i_o(in(TypeFunc::I_O)); 624 625 Node* src = in(ArrayCopyNode::Src); 626 Node* dest = in(ArrayCopyNode::Dest); 627 const TypeAryPtr* atp_src = get_address_type(phase, src); 628 const TypeAryPtr* atp_dest = get_address_type(phase, dest); 629 uint alias_idx_src = phase->C->get_alias_index(atp_src); 630 uint alias_idx_dest = phase->C->get_alias_index(atp_dest); 631 632 if (can_reshape) { 633 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); 634 phase->is_IterGVN()->set_delay_transform(true); 635 } 636 637 GraphKit kit(new_jvms, phase); 638 639 SafePointNode* backward_map = NULL; 640 SafePointNode* forward_map = NULL; 641 Node* backward_ctl = phase->C->top(); 642 643 array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl); 644 645 { 646 PreserveJVMState pjvms(&kit); 647 648 array_copy_forward(kit, can_reshape, 649 atp_src, atp_dest, 650 adr_src, base_src, adr_dest, base_dest, 651 copy_type, value_type, count); 652 653 forward_map = kit.stop(); 654 } 655 656 kit.set_control(backward_ctl); 657 array_copy_backward(kit, can_reshape, 658 atp_src, atp_dest, 659 adr_src, base_src, adr_dest, base_dest, 660 copy_type, value_type, count); 661 662 backward_map = kit.stop(); 663 664 if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) { 665 assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?"); 666 Node* ctl = new RegionNode(3); 667 Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM); 668 kit.set_map(forward_map); 669 ctl->init_req(1, kit.control()); 670 mem->init_req(1, kit.reset_memory()); 671 kit.set_map(backward_map); 672 ctl->init_req(2, kit.control()); 673 mem->init_req(2, kit.reset_memory()); 674 kit.set_control(phase->transform(ctl)); 675 kit.set_all_memory(phase->transform(mem)); 676 } else if (!forward_map->control()->is_top()) { 677 kit.set_map(forward_map); 678 } else { 679 assert(!backward_map->control()->is_top(), "no copy?"); 680 kit.set_map(backward_map); 681 } 682 683 if (can_reshape) { 684 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms"); 685 phase->is_IterGVN()->set_delay_transform(false); 686 } 687 688 mem = kit.map()->memory(); 689 if (!finish_transform(phase, can_reshape, kit.control(), mem)) { 690 if (!can_reshape) { 691 phase->record_for_igvn(this); 692 } 693 return NULL; 694 } 695 696 return mem; 697 } 698 699 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { 700 Node* dest = in(ArrayCopyNode::Dest); 701 if (dest->is_top()) { 702 return false; 703 } 704 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr(); 705 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded"); 706 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() || 707 _src_type->is_known_instance(), "result of EA not recorded"); 708 709 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) { 710 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance"); 711 return t_oop->instance_id() == _dest_type->instance_id(); 712 } 713 714 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase); 715 } 716 717 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) { 718 if (n != NULL && 719 n->is_Call() && 720 n->as_Call()->may_modify(t_oop, phase) && 721 (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) { 722 call = n->as_Call(); 723 return true; 724 } 725 return false; 726 } 727 728 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac) { 729 730 Node* c = mb->in(0); 731 732 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 733 // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off 734 c = bs->step_over_gc_barrier(c); 735 736 CallNode* call = NULL; 737 guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to."); 738 if (c->is_Region()) { 739 for (uint i = 1; i < c->req(); i++) { 740 if (c->in(i) != NULL) { 741 Node* n = c->in(i)->in(0); 742 if (may_modify_helper(t_oop, n, phase, call)) { 743 ac = call->isa_ArrayCopy(); 744 assert(c == mb->in(0), "only for clone"); 745 return true; 746 } 747 } 748 } 749 } else if (may_modify_helper(t_oop, c->in(0), phase, call)) { 750 ac = call->isa_ArrayCopy(); 751 #ifdef ASSERT 752 bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && 753 static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks(); 754 assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone"); 755 #endif 756 return true; 757 } 758 759 return false; 760 } 761 762 // Does this array copy modify offsets between offset_lo and offset_hi 763 // in the destination array 764 // if must_modify is false, return true if the copy could write 765 // between offset_lo and offset_hi 766 // if must_modify is true, return true if the copy is guaranteed to 767 // write between offset_lo and offset_hi 768 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const { 769 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies"); 770 771 Node* dest = in(Dest); 772 Node* dest_pos = in(DestPos); 773 Node* len = in(Length); 774 775 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int(); 776 const TypeInt *len_t = phase->type(len)->isa_int(); 777 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr(); 778 779 if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) { 780 return !must_modify; 781 } 782 783 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type(); 784 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem); 785 uint elemsize = type2aelembytes(ary_elem); 786 787 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header; 788 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header; 789 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header; 790 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header; 791 792 if (must_modify) { 793 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) { 794 return true; 795 } 796 } else { 797 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) { 798 return true; 799 } 800 } 801 return false; 802 }