1 /* 2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 29 #include "gc/shenandoah/shenandoahBrooksPointer.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "gc/shenandoah/shenandoahRuntime.hpp" 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/block.hpp" 36 #include "opto/callnode.hpp" 37 #include "opto/castnode.hpp" 38 #include "opto/movenode.hpp" 39 #include "opto/phaseX.hpp" 40 #include "opto/rootnode.hpp" 41 #include "opto/runtime.hpp" 42 #include "opto/subnode.hpp" 43 44 Node* ShenandoahBarrierNode::skip_through_barrier(Node* n) { 45 if (!UseShenandoahGC) { 46 return n; 47 } 48 if (n == NULL) { 49 return NULL; 50 } 51 if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { 52 n = n->in(1); 53 } 54 55 if (n->is_ShenandoahBarrier()) { 56 return n->in(ValueIn); 57 } else if (n->is_Phi() && 58 n->req() == 3 && 59 n->in(1) != NULL && 60 n->in(1)->is_ShenandoahBarrier() && 61 n->in(2) != NULL && 62 n->in(2)->bottom_type() == TypePtr::NULL_PTR && 63 n->in(0) != NULL && 64 n->in(0)->in(1) != NULL && 65 n->in(0)->in(1)->is_IfProj() && 66 n->in(0)->in(2) != NULL && 67 n->in(0)->in(2)->is_IfProj() && 68 n->in(0)->in(1)->in(0) != NULL && 69 n->in(0)->in(1)->in(0) == n->in(0)->in(2)->in(0) && 70 n->in(1)->in(ValueIn)->Opcode() == Op_CastPP) { 71 Node* iff = n->in(0)->in(1)->in(0); 72 Node* res = n->in(1)->in(ValueIn)->in(1); 73 if (iff->is_If() && 74 iff->in(1) != NULL && 75 iff->in(1)->is_Bool() && 76 iff->in(1)->as_Bool()->_test._test == BoolTest::ne && 77 iff->in(1)->in(1) != NULL && 78 iff->in(1)->in(1)->Opcode() == Op_CmpP && 79 iff->in(1)->in(1)->in(1) != NULL && 80 iff->in(1)->in(1)->in(1) == res && 81 iff->in(1)->in(1)->in(2) != NULL && 82 iff->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { 83 return res; 84 } 85 } 86 return n; 87 } 88 89 bool ShenandoahBarrierNode::needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace) { 90 Unique_Node_List visited; 91 return needs_barrier_impl(phase, orig, n, rb_mem, allow_fromspace, visited); 92 } 93 94 bool ShenandoahBarrierNode::needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited) { 95 if (visited.member(n)) { 96 return false; // Been there. 97 } 98 visited.push(n); 99 100 if (n->is_Allocate()) { 101 return false; 102 } 103 104 if (n->is_Call()) { 105 return true; 106 } 107 108 const Type* type = phase->type(n); 109 if (type == Type::TOP) { 110 return false; 111 } 112 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { 113 return false; 114 } 115 if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) { 116 return false; 117 } 118 119 if (ShenandoahOptimizeStableFinals) { 120 const TypeAryPtr* ary = type->isa_aryptr(); 121 if (ary && ary->is_stable() && allow_fromspace) { 122 return false; 123 } 124 } 125 126 if (n->is_CheckCastPP() || n->is_ConstraintCast() || n->Opcode() == Op_ShenandoahEnqueueBarrier) { 127 return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited); 128 } 129 if (n->is_Parm()) { 130 return true; 131 } 132 if (n->is_Proj()) { 133 return needs_barrier_impl(phase, orig, n->in(0), rb_mem, allow_fromspace, visited); 134 } 135 136 if (n->Opcode() == Op_ShenandoahWBMemProj) { 137 return needs_barrier_impl(phase, orig, n->in(ShenandoahWBMemProjNode::WriteBarrier), rb_mem, allow_fromspace, visited); 138 } 139 if (n->is_Phi()) { 140 bool need_barrier = false; 141 for (uint i = 1; i < n->req() && ! need_barrier; i++) { 142 Node* input = n->in(i); 143 if (input == NULL) { 144 need_barrier = true; // Phi not complete yet? 145 } else if (needs_barrier_impl(phase, orig, input, rb_mem, allow_fromspace, visited)) { 146 need_barrier = true; 147 } 148 } 149 return need_barrier; 150 } 151 if (n->is_CMove()) { 152 return needs_barrier_impl(phase, orig, n->in(CMoveNode::IfFalse), rb_mem, allow_fromspace, visited) || 153 needs_barrier_impl(phase, orig, n->in(CMoveNode::IfTrue ), rb_mem, allow_fromspace, visited); 154 } 155 if (n->Opcode() == Op_CreateEx) { 156 return true; 157 } 158 if (n->Opcode() == Op_ShenandoahWriteBarrier) { 159 return false; 160 } 161 if (n->Opcode() == Op_ShenandoahReadBarrier) { 162 if (rb_mem == n->in(Memory)) { 163 return false; 164 } else { 165 return true; 166 } 167 } 168 169 if (n->Opcode() == Op_LoadP || 170 n->Opcode() == Op_LoadN || 171 n->Opcode() == Op_GetAndSetP || 172 n->Opcode() == Op_CompareAndExchangeP || 173 n->Opcode() == Op_GetAndSetN || 174 n->Opcode() == Op_CompareAndExchangeN) { 175 return true; 176 } 177 if (n->Opcode() == Op_DecodeN || 178 n->Opcode() == Op_EncodeP) { 179 return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited); 180 } 181 182 #ifdef ASSERT 183 tty->print("need barrier on?: "); n->dump(); 184 ShouldNotReachHere(); 185 #endif 186 return true; 187 } 188 189 bool ShenandoahReadBarrierNode::dominates_memory_rb_impl(PhaseGVN* phase, 190 Node* b1, 191 Node* b2, 192 Node* current, 193 bool linear) { 194 ResourceMark rm; 195 VectorSet visited(Thread::current()->resource_area()); 196 Node_Stack phis(0); 197 198 for(int i = 0; i < 10; i++) { 199 if (current == NULL) { 200 return false; 201 } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) { 202 current = NULL; 203 while (phis.is_nonempty() && current == NULL) { 204 uint idx = phis.index(); 205 Node* phi = phis.node(); 206 if (idx >= phi->req()) { 207 phis.pop(); 208 } else { 209 current = phi->in(idx); 210 phis.set_index(idx+1); 211 } 212 } 213 if (current == NULL) { 214 return true; 215 } 216 } else if (current == phase->C->immutable_memory()) { 217 return false; 218 } else if (current->isa_Phi()) { 219 if (!linear) { 220 return false; 221 } 222 phis.push(current, 2); 223 current = current->in(1); 224 } else if (current->Opcode() == Op_ShenandoahWriteBarrier) { 225 const Type* in_type = current->bottom_type(); 226 const Type* this_type = b2->bottom_type(); 227 if (is_independent(in_type, this_type)) { 228 current = current->in(Memory); 229 } else { 230 return false; 231 } 232 } else if (current->Opcode() == Op_ShenandoahWBMemProj) { 233 current = current->in(ShenandoahWBMemProjNode::WriteBarrier); 234 } else if (current->is_Proj()) { 235 current = current->in(0); 236 } else if (current->is_Call()) { 237 return false; // TODO: Maybe improve by looking at the call's memory effects? 238 } else if (current->is_MemBar()) { 239 return false; // TODO: Do we need to stop at *any* membar? 240 } else if (current->is_MergeMem()) { 241 const TypePtr* adr_type = brooks_pointer_type(phase->type(b2)); 242 uint alias_idx = phase->C->get_alias_index(adr_type); 243 current = current->as_MergeMem()->memory_at(alias_idx); 244 } else { 245 #ifdef ASSERT 246 current->dump(); 247 #endif 248 ShouldNotReachHere(); 249 return false; 250 } 251 } 252 return false; 253 } 254 255 bool ShenandoahReadBarrierNode::is_independent(Node* mem) { 256 if (mem->is_Phi() || mem->is_Proj() || mem->is_MergeMem()) { 257 return true; 258 } else if (mem->Opcode() == Op_ShenandoahWBMemProj) { 259 return true; 260 } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) { 261 const Type* mem_type = mem->bottom_type(); 262 const Type* this_type = bottom_type(); 263 if (is_independent(mem_type, this_type)) { 264 return true; 265 } else { 266 return false; 267 } 268 } else if (mem->is_Call() || mem->is_MemBar()) { 269 return false; 270 } 271 #ifdef ASSERT 272 mem->dump(); 273 #endif 274 ShouldNotReachHere(); 275 return true; 276 } 277 278 bool ShenandoahReadBarrierNode::dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear) { 279 return dominates_memory_rb_impl(phase, b1->in(Memory), b2, b2->in(Memory), linear); 280 } 281 282 bool ShenandoahReadBarrierNode::is_independent(const Type* in_type, const Type* this_type) { 283 assert(in_type->isa_oopptr(), "expect oop ptr"); 284 assert(this_type->isa_oopptr(), "expect oop ptr"); 285 286 ciKlass* in_kls = in_type->is_oopptr()->klass(); 287 ciKlass* this_kls = this_type->is_oopptr()->klass(); 288 if (in_kls != NULL && this_kls != NULL && 289 in_kls->is_loaded() && this_kls->is_loaded() && 290 (!in_kls->is_subclass_of(this_kls)) && 291 (!this_kls->is_subclass_of(in_kls))) { 292 return true; 293 } 294 return false; 295 } 296 297 Node* ShenandoahReadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) { 298 if (! can_reshape) { 299 return NULL; 300 } 301 302 if (in(Memory) == phase->C->immutable_memory()) return NULL; 303 304 // If memory input is a MergeMem, take the appropriate slice out of it. 305 Node* mem_in = in(Memory); 306 if (mem_in->isa_MergeMem()) { 307 const TypePtr* adr_type = brooks_pointer_type(bottom_type()); 308 uint alias_idx = phase->C->get_alias_index(adr_type); 309 mem_in = mem_in->as_MergeMem()->memory_at(alias_idx); 310 set_req(Memory, mem_in); 311 return this; 312 } 313 314 Node* input = in(Memory); 315 if (input->Opcode() == Op_ShenandoahWBMemProj) { 316 ResourceMark rm; 317 VectorSet seen(Thread::current()->resource_area()); 318 Node* n = in(Memory); 319 while (n->Opcode() == Op_ShenandoahWBMemProj && 320 n->in(ShenandoahWBMemProjNode::WriteBarrier) != NULL && 321 n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier && 322 n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory) != NULL) { 323 if (seen.test_set(n->_idx)) { 324 return NULL; // loop 325 } 326 n = n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory); 327 } 328 329 Node* wb = input->in(ShenandoahWBMemProjNode::WriteBarrier); 330 const Type* in_type = phase->type(wb); 331 // is_top() test not sufficient here: we can come here after CCP 332 // in a dead branch of the graph that has not yet been removed. 333 if (in_type == Type::TOP) return NULL; // Dead path. 334 assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier"); 335 if (is_independent(in_type, _type)) { 336 phase->igvn_rehash_node_delayed(wb); 337 set_req(Memory, wb->in(Memory)); 338 if (can_reshape && input->outcnt() == 0) { 339 phase->is_IterGVN()->_worklist.push(input); 340 } 341 return this; 342 } 343 } 344 return NULL; 345 } 346 347 ShenandoahWriteBarrierNode::ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj) 348 : ShenandoahBarrierNode(ctrl, mem, obj, false) { 349 assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled"); 350 ShenandoahBarrierSetC2::bsc2()->state()->add_shenandoah_barrier(this); 351 } 352 353 Node* ShenandoahWriteBarrierNode::Identity(PhaseGVN* phase) { 354 assert(in(0) != NULL, "should have control"); 355 PhaseIterGVN* igvn = phase->is_IterGVN(); 356 Node* mem_in = in(Memory); 357 Node* mem_proj = NULL; 358 359 if (igvn != NULL) { 360 mem_proj = find_out_with(Op_ShenandoahWBMemProj); 361 if (mem_in == mem_proj) { 362 return this; 363 } 364 } 365 366 Node* replacement = Identity_impl(phase); 367 if (igvn != NULL) { 368 if (replacement != NULL && replacement != this && mem_proj != NULL) { 369 igvn->replace_node(mem_proj, mem_in); 370 } 371 } 372 return replacement; 373 } 374 375 Node* ShenandoahWriteBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) { 376 assert(in(0) != NULL, "should have control"); 377 if (!can_reshape) { 378 return NULL; 379 } 380 381 Node* mem_in = in(Memory); 382 383 if (mem_in->isa_MergeMem()) { 384 const TypePtr* adr_type = brooks_pointer_type(bottom_type()); 385 uint alias_idx = phase->C->get_alias_index(adr_type); 386 mem_in = mem_in->as_MergeMem()->memory_at(alias_idx); 387 set_req(Memory, mem_in); 388 return this; 389 } 390 391 Node* val = in(ValueIn); 392 if (val->is_ShenandoahBarrier()) { 393 set_req(ValueIn, val->in(ValueIn)); 394 return this; 395 } 396 397 return NULL; 398 } 399 400 bool ShenandoahWriteBarrierNode::expand(Compile* C, PhaseIterGVN& igvn, int& loop_opts_cnt) { 401 if (UseShenandoahGC) { 402 if (ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() > 0 || (!ShenandoahWriteBarrier && ShenandoahStoreValEnqueueBarrier)) { 403 bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion; 404 C->clear_major_progress(); 405 PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand); 406 if (C->failing()) return false; 407 PhaseIdealLoop::verify(igvn); 408 DEBUG_ONLY(ShenandoahBarrierNode::verify_raw_mem(C->root());) 409 if (attempt_more_loopopts) { 410 C->set_major_progress(); 411 if (!C->optimize_loops(loop_opts_cnt, igvn, LoopOptsShenandoahPostExpand)) { 412 return false; 413 } 414 C->clear_major_progress(); 415 } 416 } 417 } 418 return true; 419 } 420 421 bool ShenandoahWriteBarrierNode::is_heap_state_test(Node* iff, int mask) { 422 if (!UseShenandoahGC) { 423 return false; 424 } 425 assert(iff->is_If(), "bad input"); 426 if (iff->Opcode() != Op_If) { 427 return false; 428 } 429 Node* bol = iff->in(1); 430 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) { 431 return false; 432 } 433 Node* cmp = bol->in(1); 434 if (cmp->Opcode() != Op_CmpI) { 435 return false; 436 } 437 Node* in1 = cmp->in(1); 438 Node* in2 = cmp->in(2); 439 if (in2->find_int_con(-1) != 0) { 440 return false; 441 } 442 if (in1->Opcode() != Op_AndI) { 443 return false; 444 } 445 in2 = in1->in(2); 446 if (in2->find_int_con(-1) != mask) { 447 return false; 448 } 449 in1 = in1->in(1); 450 451 return is_gc_state_load(in1); 452 } 453 454 bool ShenandoahWriteBarrierNode::is_heap_stable_test(Node* iff) { 455 return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED); 456 } 457 458 bool ShenandoahWriteBarrierNode::is_gc_state_load(Node *n) { 459 if (!UseShenandoahGC) { 460 return false; 461 } 462 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) { 463 return false; 464 } 465 Node* addp = n->in(MemNode::Address); 466 if (!addp->is_AddP()) { 467 return false; 468 } 469 Node* base = addp->in(AddPNode::Address); 470 Node* off = addp->in(AddPNode::Offset); 471 if (base->Opcode() != Op_ThreadLocal) { 472 return false; 473 } 474 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) { 475 return false; 476 } 477 return true; 478 } 479 480 bool ShenandoahWriteBarrierNode::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { 481 assert(phase->is_dominator(stop, start), "bad inputs"); 482 ResourceMark rm; 483 Unique_Node_List wq; 484 wq.push(start); 485 for (uint next = 0; next < wq.size(); next++) { 486 Node *m = wq.at(next); 487 if (m == stop) { 488 continue; 489 } 490 if (m->is_SafePoint() && !m->is_CallLeaf()) { 491 return true; 492 } 493 if (m->is_Region()) { 494 for (uint i = 1; i < m->req(); i++) { 495 wq.push(m->in(i)); 496 } 497 } else { 498 wq.push(m->in(0)); 499 } 500 } 501 return false; 502 } 503 504 bool ShenandoahWriteBarrierNode::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) { 505 assert(is_gc_state_load(n), "inconsistent"); 506 Node* addp = n->in(MemNode::Address); 507 Node* dominator = NULL; 508 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 509 Node* u = addp->fast_out(i); 510 assert(is_gc_state_load(u), "inconsistent"); 511 if (u != n && phase->is_dominator(u->in(0), n->in(0))) { 512 if (dominator == NULL) { 513 dominator = u; 514 } else { 515 if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) { 516 dominator = u; 517 } 518 } 519 } 520 } 521 if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) { 522 return false; 523 } 524 phase->igvn().replace_node(n, dominator); 525 526 return true; 527 } 528 529 bool ShenandoahBarrierNode::dominates_memory_impl(PhaseGVN* phase, 530 Node* b1, 531 Node* b2, 532 Node* current, 533 bool linear) { 534 ResourceMark rm; 535 VectorSet visited(Thread::current()->resource_area()); 536 Node_Stack phis(0); 537 538 for(int i = 0; i < 10; i++) { 539 if (current == NULL) { 540 return false; 541 } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) { 542 current = NULL; 543 while (phis.is_nonempty() && current == NULL) { 544 uint idx = phis.index(); 545 Node* phi = phis.node(); 546 if (idx >= phi->req()) { 547 phis.pop(); 548 } else { 549 current = phi->in(idx); 550 phis.set_index(idx+1); 551 } 552 } 553 if (current == NULL) { 554 return true; 555 } 556 } else if (current == b2) { 557 return false; 558 } else if (current == phase->C->immutable_memory()) { 559 return false; 560 } else if (current->isa_Phi()) { 561 if (!linear) { 562 return false; 563 } 564 phis.push(current, 2); 565 current = current->in(1); 566 } else if (current->Opcode() == Op_ShenandoahWriteBarrier) { 567 current = current->in(Memory); 568 } else if (current->Opcode() == Op_ShenandoahWBMemProj) { 569 current = current->in(ShenandoahWBMemProjNode::WriteBarrier); 570 } else if (current->is_Proj()) { 571 current = current->in(0); 572 } else if (current->is_Call()) { 573 current = current->in(TypeFunc::Memory); 574 } else if (current->is_MemBar()) { 575 current = current->in(TypeFunc::Memory); 576 } else if (current->is_MergeMem()) { 577 const TypePtr* adr_type = brooks_pointer_type(phase->type(b2)); 578 uint alias_idx = phase->C->get_alias_index(adr_type); 579 current = current->as_MergeMem()->memory_at(alias_idx); 580 } else { 581 #ifdef ASSERT 582 current->dump(); 583 #endif 584 ShouldNotReachHere(); 585 return false; 586 } 587 } 588 return false; 589 } 590 591 /** 592 * Determines if b1 dominates b2 through memory inputs. It returns true if: 593 * - b1 can be reached by following each branch in b2's memory input (through phis, etc) 594 * - or we get back to b2 (i.e. through a loop) without seeing b1 595 * In all other cases, (in particular, if we reach immutable_memory without having seen b1) 596 * we return false. 597 */ 598 bool ShenandoahBarrierNode::dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear) { 599 return dominates_memory_impl(phase, b1, b2, b2->in(Memory), linear); 600 } 601 602 Node* ShenandoahBarrierNode::Identity_impl(PhaseGVN* phase) { 603 Node* n = in(ValueIn); 604 605 Node* rb_mem = Opcode() == Op_ShenandoahReadBarrier ? in(Memory) : NULL; 606 if (! needs_barrier(phase, this, n, rb_mem, _allow_fromspace)) { 607 return n; 608 } 609 610 // Try to find a write barrier sibling with identical inputs that we can fold into. 611 for (DUIterator i = n->outs(); n->has_out(i); i++) { 612 Node* sibling = n->out(i); 613 if (sibling == this) { 614 continue; 615 } 616 if (sibling->Opcode() != Op_ShenandoahWriteBarrier) { 617 continue; 618 } 619 620 assert(sibling->in(ValueIn) == in(ValueIn), "sanity"); 621 assert(sibling->Opcode() == Op_ShenandoahWriteBarrier, "sanity"); 622 623 if (dominates_memory(phase, sibling, this, phase->is_IterGVN() == NULL)) { 624 return sibling; 625 } 626 } 627 return this; 628 } 629 630 #ifndef PRODUCT 631 void ShenandoahBarrierNode::dump_spec(outputStream *st) const { 632 const TypePtr* adr = adr_type(); 633 if (adr == NULL) { 634 return; 635 } 636 st->print(" @"); 637 adr->dump_on(st); 638 st->print(" ("); 639 Compile::current()->alias_type(adr)->adr_type()->dump_on(st); 640 st->print(") "); 641 } 642 #endif 643 644 Node* ShenandoahReadBarrierNode::Identity(PhaseGVN* phase) { 645 Node* id = Identity_impl(phase); 646 647 if (id == this && phase->is_IterGVN()) { 648 Node* n = in(ValueIn); 649 // No success in super call. Try to combine identical read barriers. 650 for (DUIterator i = n->outs(); n->has_out(i); i++) { 651 Node* sibling = n->out(i); 652 if (sibling == this || sibling->Opcode() != Op_ShenandoahReadBarrier) { 653 continue; 654 } 655 assert(sibling->in(ValueIn) == in(ValueIn), "sanity"); 656 if (phase->is_IterGVN()->hash_find(sibling) && 657 sibling->bottom_type() == bottom_type() && 658 sibling->in(Control) == in(Control) && 659 dominates_memory_rb(phase, sibling, this, phase->is_IterGVN() == NULL)) { 660 return sibling; 661 } 662 } 663 } 664 return id; 665 } 666 667 const Type* ShenandoahBarrierNode::Value(PhaseGVN* phase) const { 668 // Either input is TOP ==> the result is TOP 669 const Type *t1 = phase->type(in(Memory)); 670 if (t1 == Type::TOP) return Type::TOP; 671 const Type *t2 = phase->type(in(ValueIn)); 672 if( t2 == Type::TOP ) return Type::TOP; 673 674 if (t2 == TypePtr::NULL_PTR) { 675 return _type; 676 } 677 678 const Type* type = t2->is_oopptr()->cast_to_nonconst(); 679 return type; 680 } 681 682 uint ShenandoahBarrierNode::hash() const { 683 return TypeNode::hash() + _allow_fromspace; 684 } 685 686 uint ShenandoahBarrierNode::cmp(const Node& n) const { 687 return _allow_fromspace == ((ShenandoahBarrierNode&) n)._allow_fromspace 688 && TypeNode::cmp(n); 689 } 690 691 uint ShenandoahBarrierNode::size_of() const { 692 return sizeof(*this); 693 } 694 695 Node* ShenandoahWBMemProjNode::Identity(PhaseGVN* phase) { 696 Node* wb = in(WriteBarrier); 697 if (wb->is_top()) return phase->C->top(); // Dead path. 698 699 assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier"); 700 PhaseIterGVN* igvn = phase->is_IterGVN(); 701 // We can't do the below unless the graph is fully constructed. 702 if (igvn == NULL) { 703 return this; 704 } 705 706 // If the mem projection has no barrier users, it's not needed anymore. 707 if (wb->outcnt() == 1) { 708 return wb->in(ShenandoahBarrierNode::Memory); 709 } 710 711 return this; 712 } 713 714 #ifdef ASSERT 715 bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { 716 assert(phis.size() == 0, ""); 717 718 while (true) { 719 if (in->bottom_type() == TypePtr::NULL_PTR) { 720 if (trace) {tty->print_cr("NULL");} 721 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) { 722 if (trace) {tty->print_cr("Non oop");} 723 } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals && 724 in->bottom_type()->make_ptr()->isa_aryptr() && 725 in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) { 726 if (trace) {tty->print_cr("Stable array load");} 727 } else { 728 if (in->is_ConstraintCast()) { 729 in = in->in(1); 730 continue; 731 } else if (in->is_AddP()) { 732 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access"); 733 in = in->in(AddPNode::Address); 734 continue; 735 } else if (in->is_Con()) { 736 if (trace) {tty->print("Found constant"); in->dump();} 737 } else if (in->is_ShenandoahBarrier()) { 738 if (t == ShenandoahOopStore) { 739 if (in->Opcode() != Op_ShenandoahWriteBarrier) { 740 return false; 741 } 742 uint i = 0; 743 for (; i < phis.size(); i++) { 744 Node* n = phis.node_at(i); 745 if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { 746 break; 747 } 748 } 749 if (i == phis.size()) { 750 return false; 751 } 752 } else if (t == ShenandoahStore && in->Opcode() != Op_ShenandoahWriteBarrier) { 753 return false; 754 } 755 barriers_used.push(in); 756 if (trace) {tty->print("Found barrier"); in->dump();} 757 } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) { 758 if (t != ShenandoahOopStore) { 759 return false; 760 } 761 if (trace) {tty->print("Found enqueue barrier"); in->dump();} 762 phis.push(in, in->req()); 763 in = in->in(1); 764 continue; 765 } else if (in->is_Proj() && in->in(0)->is_Allocate()) { 766 if (trace) {tty->print("Found alloc"); in->in(0)->dump();} 767 } else if (in->is_Phi()) { 768 if (!visited.test_set(in->_idx)) { 769 if (trace) {tty->print("Pushed phi:"); in->dump();} 770 phis.push(in, 2); 771 in = in->in(1); 772 continue; 773 } 774 if (trace) {tty->print("Already seen phi:"); in->dump();} 775 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) { 776 if (!visited.test_set(in->_idx)) { 777 if (trace) {tty->print("Pushed cmovep:"); in->dump();} 778 phis.push(in, CMoveNode::IfTrue); 779 in = in->in(CMoveNode::IfFalse); 780 continue; 781 } 782 if (trace) {tty->print("Already seen cmovep:"); in->dump();} 783 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) { 784 in = in->in(1); 785 continue; 786 } else { 787 return false; 788 } 789 } 790 bool cont = false; 791 while (phis.is_nonempty()) { 792 uint idx = phis.index(); 793 Node* phi = phis.node(); 794 if (idx >= phi->req()) { 795 if (trace) {tty->print("Popped phi:"); phi->dump();} 796 phis.pop(); 797 continue; 798 } 799 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();} 800 in = phi->in(idx); 801 phis.set_index(idx+1); 802 cont = true; 803 break; 804 } 805 if (!cont) { 806 break; 807 } 808 } 809 return true; 810 } 811 812 void ShenandoahBarrierNode::report_verify_failure(const char *msg, Node *n1, Node *n2) { 813 if (n1 != NULL) { 814 n1->dump(+10); 815 } 816 if (n2 != NULL) { 817 n2->dump(+10); 818 } 819 fatal("%s", msg); 820 } 821 822 void ShenandoahBarrierNode::verify(RootNode* root) { 823 ResourceMark rm; 824 Unique_Node_List wq; 825 GrowableArray<Node*> barriers; 826 Unique_Node_List barriers_used; 827 Node_Stack phis(0); 828 VectorSet visited(Thread::current()->resource_area()); 829 const bool trace = false; 830 const bool verify_no_useless_barrier = false; 831 832 wq.push(root); 833 for (uint next = 0; next < wq.size(); next++) { 834 Node *n = wq.at(next); 835 if (n->is_Load()) { 836 const bool trace = false; 837 if (trace) {tty->print("Verifying"); n->dump();} 838 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) { 839 if (trace) {tty->print_cr("Load range/klass");} 840 } else { 841 const TypePtr* adr_type = n->as_Load()->adr_type(); 842 843 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) { 844 if (trace) {tty->print_cr("Mark load");} 845 } else if (adr_type->isa_instptr() && 846 adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) && 847 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) { 848 if (trace) {tty->print_cr("Reference.get()");} 849 } else { 850 bool verify = true; 851 if (adr_type->isa_instptr()) { 852 const TypeInstPtr* tinst = adr_type->is_instptr(); 853 ciKlass* k = tinst->klass(); 854 assert(k->is_instance_klass(), ""); 855 ciInstanceKlass* ik = (ciInstanceKlass*)k; 856 int offset = adr_type->offset(); 857 858 if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) || 859 (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) { 860 if (trace) {tty->print_cr("Final/stable");} 861 verify = false; 862 } else if (k == ciEnv::current()->Class_klass() && 863 tinst->const_oop() != NULL && 864 tinst->offset() >= (ik->size_helper() * wordSize)) { 865 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); 866 ciField* field = k->get_field_by_offset(tinst->offset(), true); 867 if ((ShenandoahOptimizeStaticFinals && field->is_final()) || 868 (ShenandoahOptimizeStableFinals && field->is_stable())) { 869 verify = false; 870 } 871 } 872 } 873 874 if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { 875 report_verify_failure("Shenandoah verification: Load should have barriers", n); 876 } 877 } 878 } 879 } else if (n->is_Store()) { 880 const bool trace = false; 881 882 if (trace) {tty->print("Verifying"); n->dump();} 883 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) { 884 Node* adr = n->in(MemNode::Address); 885 bool verify = true; 886 887 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) { 888 adr = adr->in(AddPNode::Address); 889 if (adr->is_AddP()) { 890 assert(adr->in(AddPNode::Base)->is_top(), ""); 891 adr = adr->in(AddPNode::Address); 892 if (adr->Opcode() == Op_LoadP && 893 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() && 894 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 895 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) { 896 if (trace) {tty->print_cr("SATB prebarrier");} 897 verify = false; 898 } 899 } 900 } 901 902 if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 903 report_verify_failure("Shenandoah verification: Store should have barriers", n); 904 } 905 } 906 if (!ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 907 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); 908 } 909 } else if (n->Opcode() == Op_CmpP) { 910 const bool trace = false; 911 912 Node* in1 = n->in(1); 913 Node* in2 = n->in(2); 914 if (in1->bottom_type()->isa_oopptr()) { 915 if (trace) {tty->print("Verifying"); n->dump();} 916 917 bool mark_inputs = false; 918 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR || 919 (in1->is_Con() || in2->is_Con())) { 920 if (trace) {tty->print_cr("Comparison against a constant");} 921 mark_inputs = true; 922 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) || 923 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) { 924 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");} 925 mark_inputs = true; 926 } else { 927 assert(in2->bottom_type()->isa_oopptr(), ""); 928 929 if (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || 930 !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { 931 report_verify_failure("Shenandoah verification: Cmp should have barriers", n); 932 } 933 } 934 if (verify_no_useless_barrier && 935 mark_inputs && 936 (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || 937 !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { 938 phis.clear(); 939 visited.Reset(); 940 } 941 } 942 } else if (n->is_LoadStore()) { 943 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && 944 !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 945 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); 946 } 947 948 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 949 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); 950 } 951 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { 952 CallNode* call = n->as_Call(); 953 954 static struct { 955 const char* name; 956 struct { 957 int pos; 958 verify_type t; 959 } args[6]; 960 } calls[] = { 961 "aescrypt_encryptBlock", 962 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 963 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 964 "aescrypt_decryptBlock", 965 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 966 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 967 "multiplyToLen", 968 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore }, 969 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 970 "squareToLen", 971 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone}, 972 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 973 "montgomery_multiply", 974 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 975 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 976 "montgomery_square", 977 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore }, 978 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 979 "mulAdd", 980 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 981 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 982 "vectorizedMismatch", 983 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 984 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 985 "updateBytesCRC32", 986 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 987 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 988 "updateBytesAdler32", 989 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 990 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 991 "updateBytesCRC32C", 992 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone}, 993 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 994 "counterMode_AESCrypt", 995 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 996 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } }, 997 "cipherBlockChaining_encryptAESCrypt", 998 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 999 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1000 "cipherBlockChaining_decryptAESCrypt", 1001 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 1002 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1003 "shenandoah_clone_barrier", 1004 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 1005 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1006 "ghash_processBlocks", 1007 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 1008 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1009 "sha1_implCompress", 1010 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 1011 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1012 "sha256_implCompress", 1013 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 1014 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1015 "sha512_implCompress", 1016 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 1017 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1018 "sha1_implCompressMB", 1019 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 1020 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1021 "sha256_implCompressMB", 1022 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 1023 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1024 "sha512_implCompressMB", 1025 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 1026 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1027 "encodeBlock", 1028 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, 1029 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 1030 }; 1031 1032 if (call->is_call_to_arraycopystub()) { 1033 Node* dest = NULL; 1034 const TypeTuple* args = n->as_Call()->_tf->domain(); 1035 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 1036 if (args->field_at(i)->isa_ptr()) { 1037 j++; 1038 if (j == 2) { 1039 dest = n->in(i); 1040 break; 1041 } 1042 } 1043 } 1044 if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || 1045 !ShenandoahBarrierNode::verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { 1046 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); 1047 } 1048 } else if (strlen(call->_name) > 5 && 1049 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) { 1050 if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { 1051 report_verify_failure("Shenandoah verification: _fill should have barriers", n); 1052 } 1053 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) { 1054 // skip 1055 } else { 1056 const int calls_len = sizeof(calls) / sizeof(calls[0]); 1057 int i = 0; 1058 for (; i < calls_len; i++) { 1059 if (!strcmp(calls[i].name, call->_name)) { 1060 break; 1061 } 1062 } 1063 if (i != calls_len) { 1064 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]); 1065 for (uint j = 0; j < args_len; j++) { 1066 int pos = calls[i].args[j].pos; 1067 if (pos == -1) { 1068 break; 1069 } 1070 if (!ShenandoahBarrierNode::verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { 1071 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 1072 } 1073 } 1074 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 1075 if (call->in(j)->bottom_type()->make_ptr() && 1076 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 1077 uint k = 0; 1078 for (; k < args_len && calls[i].args[k].pos != (int)j; k++); 1079 if (k == args_len) { 1080 fatal("arg %d for call %s not covered", j, call->_name); 1081 } 1082 } 1083 } 1084 } else { 1085 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 1086 if (call->in(j)->bottom_type()->make_ptr() && 1087 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 1088 fatal("%s not covered", call->_name); 1089 } 1090 } 1091 } 1092 } 1093 } else if (n->is_ShenandoahBarrier()) { 1094 assert(!barriers.contains(n), ""); 1095 assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->find_out_with(Op_ShenandoahWBMemProj) != NULL, "bad shenandoah write barrier"); 1096 assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->outcnt() > 1, "bad shenandoah write barrier"); 1097 barriers.push(n); 1098 } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { 1099 // skip 1100 } else if (n->Opcode() == Op_ShenandoahWBMemProj) { 1101 assert(n->in(0) == NULL && n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier, "strange ShenandoahWBMemProj"); 1102 } else if (n->is_AddP() 1103 || n->is_Phi() 1104 || n->is_ConstraintCast() 1105 || n->Opcode() == Op_Return 1106 || n->Opcode() == Op_CMoveP 1107 || n->Opcode() == Op_CMoveN 1108 || n->Opcode() == Op_Rethrow 1109 || n->is_MemBar() 1110 || n->Opcode() == Op_Conv2B 1111 || n->Opcode() == Op_SafePoint 1112 || n->is_CallJava() 1113 || n->Opcode() == Op_Unlock 1114 || n->Opcode() == Op_EncodeP 1115 || n->Opcode() == Op_DecodeN) { 1116 // nothing to do 1117 } else { 1118 static struct { 1119 int opcode; 1120 struct { 1121 int pos; 1122 verify_type t; 1123 } inputs[2]; 1124 } others[] = { 1125 Op_FastLock, 1126 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 1127 Op_Lock, 1128 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} }, 1129 Op_ArrayCopy, 1130 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } }, 1131 Op_StrCompressedCopy, 1132 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 1133 Op_StrInflatedCopy, 1134 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 1135 Op_AryEq, 1136 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 1137 Op_StrIndexOf, 1138 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 1139 Op_StrComp, 1140 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 1141 Op_StrEquals, 1142 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 1143 Op_EncodeISOArray, 1144 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 1145 Op_HasNegatives, 1146 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} }, 1147 Op_CastP2X, 1148 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 1149 Op_StrIndexOfChar, 1150 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, 1151 }; 1152 1153 const int others_len = sizeof(others) / sizeof(others[0]); 1154 int i = 0; 1155 for (; i < others_len; i++) { 1156 if (others[i].opcode == n->Opcode()) { 1157 break; 1158 } 1159 } 1160 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req(); 1161 if (i != others_len) { 1162 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); 1163 for (uint j = 0; j < inputs_len; j++) { 1164 int pos = others[i].inputs[j].pos; 1165 if (pos == -1) { 1166 break; 1167 } 1168 if (!ShenandoahBarrierNode::verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { 1169 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 1170 } 1171 } 1172 for (uint j = 1; j < stop; j++) { 1173 if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && 1174 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 1175 uint k = 0; 1176 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++); 1177 if (k == inputs_len) { 1178 fatal("arg %d for node %s not covered", j, n->Name()); 1179 } 1180 } 1181 } 1182 } else { 1183 for (uint j = 1; j < stop; j++) { 1184 if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && 1185 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 1186 fatal("%s not covered", n->Name()); 1187 } 1188 } 1189 } 1190 } 1191 1192 if (n->is_SafePoint()) { 1193 SafePointNode* sfpt = n->as_SafePoint(); 1194 if (verify_no_useless_barrier && sfpt->jvms() != NULL) { 1195 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { 1196 if (!ShenandoahBarrierNode::verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { 1197 phis.clear(); 1198 visited.Reset(); 1199 } 1200 } 1201 } 1202 } 1203 for( uint i = 0; i < n->len(); ++i ) { 1204 Node *m = n->in(i); 1205 if (m == NULL) continue; 1206 1207 // In most cases, inputs should be known to be non null. If it's 1208 // not the case, it could be a missing cast_not_null() in an 1209 // intrinsic or support might be needed in AddPNode::Ideal() to 1210 // avoid a NULL+offset input. 1211 if (!(n->is_Phi() || 1212 (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) || 1213 n->Opcode() == Op_CmpP || 1214 n->Opcode() == Op_CmpN || 1215 (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) || 1216 (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) || 1217 n->is_ConstraintCast() || 1218 n->Opcode() == Op_Return || 1219 n->Opcode() == Op_Conv2B || 1220 n->is_AddP() || 1221 n->Opcode() == Op_CMoveP || 1222 n->Opcode() == Op_CMoveN || 1223 n->Opcode() == Op_Rethrow || 1224 n->is_MemBar() || 1225 n->is_Mem() || 1226 n->Opcode() == Op_AryEq || 1227 n->Opcode() == Op_SCMemProj || 1228 n->Opcode() == Op_EncodeP || 1229 n->Opcode() == Op_DecodeN || 1230 n->Opcode() == Op_ShenandoahWriteBarrier || 1231 n->Opcode() == Op_ShenandoahWBMemProj || 1232 n->Opcode() == Op_ShenandoahEnqueueBarrier)) { 1233 if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) { 1234 report_verify_failure("Shenandoah verification: null input", n, m); 1235 } 1236 } 1237 1238 wq.push(m); 1239 } 1240 } 1241 1242 if (verify_no_useless_barrier) { 1243 for (int i = 0; i < barriers.length(); i++) { 1244 Node* n = barriers.at(i); 1245 if (!barriers_used.member(n)) { 1246 tty->print("XXX useless barrier"); n->dump(-2); 1247 ShouldNotReachHere(); 1248 } 1249 } 1250 } 1251 } 1252 #endif 1253 1254 bool ShenandoahBarrierNode::is_dominator_same_ctrl(Node*c, Node* d, Node* n, PhaseIdealLoop* phase) { 1255 // That both nodes have the same control is not sufficient to prove 1256 // domination, verify that there's no path from d to n 1257 ResourceMark rm; 1258 Unique_Node_List wq; 1259 wq.push(d); 1260 for (uint next = 0; next < wq.size(); next++) { 1261 Node *m = wq.at(next); 1262 if (m == n) { 1263 return false; 1264 } 1265 if (m->is_Phi() && m->in(0)->is_Loop()) { 1266 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control"); 1267 } else { 1268 for (uint i = 0; i < m->req(); i++) { 1269 if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) { 1270 wq.push(m->in(i)); 1271 } 1272 } 1273 } 1274 } 1275 return true; 1276 } 1277 1278 bool ShenandoahBarrierNode::is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase) { 1279 if (d_c != n_c) { 1280 return phase->is_dominator(d_c, n_c); 1281 } 1282 return is_dominator_same_ctrl(d_c, d, n, phase); 1283 } 1284 1285 Node* next_mem(Node* mem, int alias) { 1286 Node* res = NULL; 1287 if (mem->is_Proj()) { 1288 res = mem->in(0); 1289 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 1290 res = mem->in(TypeFunc::Memory); 1291 } else if (mem->is_Phi()) { 1292 res = mem->in(1); 1293 } else if (mem->is_ShenandoahBarrier()) { 1294 res = mem->in(ShenandoahBarrierNode::Memory); 1295 } else if (mem->is_MergeMem()) { 1296 res = mem->as_MergeMem()->memory_at(alias); 1297 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 1298 assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); 1299 res = mem->in(MemNode::Memory); 1300 } else if (mem->Opcode() == Op_ShenandoahWBMemProj) { 1301 res = mem->in(ShenandoahWBMemProjNode::WriteBarrier); 1302 } else { 1303 #ifdef ASSERT 1304 mem->dump(); 1305 #endif 1306 ShouldNotReachHere(); 1307 } 1308 return res; 1309 } 1310 1311 Node* ShenandoahBarrierNode::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { 1312 Node* iffproj = NULL; 1313 while (c != dom) { 1314 Node* next = phase->idom(c); 1315 assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?"); 1316 if (c->is_Region()) { 1317 ResourceMark rm; 1318 Unique_Node_List wq; 1319 wq.push(c); 1320 for (uint i = 0; i < wq.size(); i++) { 1321 Node *n = wq.at(i); 1322 if (n == next) { 1323 continue; 1324 } 1325 if (n->is_Region()) { 1326 for (uint j = 1; j < n->req(); j++) { 1327 wq.push(n->in(j)); 1328 } 1329 } else { 1330 wq.push(n->in(0)); 1331 } 1332 } 1333 for (uint i = 0; i < wq.size(); i++) { 1334 Node *n = wq.at(i); 1335 assert(n->is_CFG(), ""); 1336 if (n->is_Multi()) { 1337 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1338 Node* u = n->fast_out(j); 1339 if (u->is_CFG()) { 1340 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { 1341 return NodeSentinel; 1342 } 1343 } 1344 } 1345 } 1346 } 1347 } else if (c->is_Proj()) { 1348 if (c->is_IfProj()) { 1349 if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) { 1350 // continue; 1351 } else { 1352 if (!allow_one_proj) { 1353 return NodeSentinel; 1354 } 1355 if (iffproj == NULL) { 1356 iffproj = c; 1357 } else { 1358 return NodeSentinel; 1359 } 1360 } 1361 } else if (c->Opcode() == Op_JumpProj) { 1362 return NodeSentinel; // unsupported 1363 } else if (c->Opcode() == Op_CatchProj) { 1364 return NodeSentinel; // unsupported 1365 } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) { 1366 return NodeSentinel; // unsupported 1367 } else { 1368 assert(next->unique_ctrl_out() == c, "unsupported branch pattern"); 1369 } 1370 } 1371 c = next; 1372 } 1373 return iffproj; 1374 } 1375 1376 #ifdef ASSERT 1377 void ShenandoahWriteBarrierNode::memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase) { 1378 const bool trace = false; 1379 if (trace) { tty->print("X control is"); c->dump(); } 1380 1381 uint start = controls.size(); 1382 controls.push(c); 1383 for (uint i = start; i < controls.size(); i++) { 1384 Node *n = controls.at(i); 1385 1386 if (trace) { tty->print("X from"); n->dump(); } 1387 1388 if (n == rep_ctrl) { 1389 continue; 1390 } 1391 1392 if (n->is_Proj()) { 1393 Node* n_dom = n->in(0); 1394 IdealLoopTree* n_dom_loop = phase->get_loop(n_dom); 1395 if (n->is_IfProj() && n_dom->outcnt() == 2) { 1396 n_dom_loop = phase->get_loop(n_dom->as_If()->proj_out(n->as_Proj()->_con == 0 ? 1 : 0)); 1397 } 1398 if (n_dom_loop != phase->ltree_root()) { 1399 Node* tail = n_dom_loop->tail(); 1400 if (tail->is_Region()) { 1401 for (uint j = 1; j < tail->req(); j++) { 1402 if (phase->is_dominator(n_dom, tail->in(j)) && !phase->is_dominator(n, tail->in(j))) { 1403 assert(phase->is_dominator(rep_ctrl, tail->in(j)), "why are we here?"); 1404 // entering loop from below, mark backedge 1405 if (trace) { tty->print("X pushing backedge"); tail->in(j)->dump(); } 1406 controls.push(tail->in(j)); 1407 //assert(n->in(0) == n_dom, "strange flow control"); 1408 } 1409 } 1410 } else if (phase->get_loop(n) != n_dom_loop && phase->is_dominator(n_dom, tail)) { 1411 // entering loop from below, mark backedge 1412 if (trace) { tty->print("X pushing backedge"); tail->dump(); } 1413 controls.push(tail); 1414 //assert(n->in(0) == n_dom, "strange flow control"); 1415 } 1416 } 1417 } 1418 1419 if (n->is_Loop()) { 1420 Node* c = n->in(LoopNode::EntryControl); 1421 if (trace) { tty->print("X pushing"); c->dump(); } 1422 controls.push(c); 1423 } else if (n->is_Region()) { 1424 for (uint i = 1; i < n->req(); i++) { 1425 Node* c = n->in(i); 1426 if (trace) { tty->print("X pushing"); c->dump(); } 1427 controls.push(c); 1428 } 1429 } else { 1430 Node* c = n->in(0); 1431 if (trace) { tty->print("X pushing"); c->dump(); } 1432 controls.push(c); 1433 } 1434 } 1435 } 1436 1437 bool ShenandoahWriteBarrierNode::memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) { 1438 const bool trace = false; 1439 if (trace) { 1440 tty->print("XXX mem is"); mem->dump(); 1441 tty->print("XXX rep ctrl is"); rep_ctrl->dump(); 1442 tty->print_cr("XXX alias is %d", alias); 1443 } 1444 ResourceMark rm; 1445 Unique_Node_List wq; 1446 Unique_Node_List controls; 1447 wq.push(mem); 1448 for (uint next = 0; next < wq.size(); next++) { 1449 Node *nn = wq.at(next); 1450 if (trace) { tty->print("XX from mem"); nn->dump(); } 1451 assert(nn->bottom_type() == Type::MEMORY, "memory only"); 1452 1453 if (nn->is_Phi()) { 1454 Node* r = nn->in(0); 1455 for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) { 1456 Node* u = r->fast_out(j); 1457 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && u != nn && 1458 (u->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(u->adr_type()) == alias)) { 1459 if (trace) { tty->print("XX Next mem (other phi)"); u->dump(); } 1460 wq.push(u); 1461 } 1462 } 1463 } 1464 1465 for (DUIterator_Fast imax, i = nn->fast_outs(imax); i < imax; i++) { 1466 Node* use = nn->fast_out(i); 1467 1468 if (trace) { tty->print("XX use %p", use->adr_type()); use->dump(); } 1469 if (use->is_CFG() && use->in(TypeFunc::Memory) == nn) { 1470 Node* c = use->in(0); 1471 if (phase->is_dominator(rep_ctrl, c)) { 1472 memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase); 1473 } else if (use->is_CallStaticJava() && use->as_CallStaticJava()->uncommon_trap_request() != 0 && c->is_Region()) { 1474 Node* region = c; 1475 if (trace) { tty->print("XX unc region"); region->dump(); } 1476 for (uint j = 1; j < region->req(); j++) { 1477 if (phase->is_dominator(rep_ctrl, region->in(j))) { 1478 if (trace) { tty->print("XX unc follows"); region->in(j)->dump(); } 1479 memory_dominates_all_paths_helper(region->in(j), rep_ctrl, controls, phase); 1480 } 1481 } 1482 } 1483 //continue; 1484 } else if (use->is_Phi()) { 1485 assert(use->bottom_type() == Type::MEMORY, "bad phi"); 1486 if ((use->adr_type() == TypePtr::BOTTOM) || 1487 phase->C->get_alias_index(use->adr_type()) == alias) { 1488 for (uint j = 1; j < use->req(); j++) { 1489 if (use->in(j) == nn) { 1490 Node* c = use->in(0)->in(j); 1491 if (phase->is_dominator(rep_ctrl, c)) { 1492 memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase); 1493 } 1494 } 1495 } 1496 } 1497 // continue; 1498 } 1499 1500 if (use->is_MergeMem()) { 1501 if (use->as_MergeMem()->memory_at(alias) == nn) { 1502 if (trace) { tty->print("XX Next mem"); use->dump(); } 1503 // follow the memory edges 1504 wq.push(use); 1505 } 1506 } else if (use->is_Phi()) { 1507 assert(use->bottom_type() == Type::MEMORY, "bad phi"); 1508 if ((use->adr_type() == TypePtr::BOTTOM) || 1509 phase->C->get_alias_index(use->adr_type()) == alias) { 1510 if (trace) { tty->print("XX Next mem"); use->dump(); } 1511 // follow the memory edges 1512 wq.push(use); 1513 } 1514 } else if (use->bottom_type() == Type::MEMORY && 1515 (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) { 1516 if (trace) { tty->print("XX Next mem"); use->dump(); } 1517 // follow the memory edges 1518 wq.push(use); 1519 } else if ((use->is_SafePoint() || use->is_MemBar()) && 1520 (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) { 1521 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1522 Node* u = use->fast_out(j); 1523 if (u->bottom_type() == Type::MEMORY) { 1524 if (trace) { tty->print("XX Next mem"); u->dump(); } 1525 // follow the memory edges 1526 wq.push(u); 1527 } 1528 } 1529 } else if (use->Opcode() == Op_ShenandoahWriteBarrier && phase->C->get_alias_index(use->adr_type()) == alias) { 1530 Node* m = use->find_out_with(Op_ShenandoahWBMemProj); 1531 if (m != NULL) { 1532 if (trace) { tty->print("XX Next mem"); m->dump(); } 1533 // follow the memory edges 1534 wq.push(m); 1535 } 1536 } 1537 } 1538 } 1539 1540 if (controls.size() == 0) { 1541 return false; 1542 } 1543 1544 for (uint i = 0; i < controls.size(); i++) { 1545 Node *n = controls.at(i); 1546 1547 if (trace) { tty->print("X checking"); n->dump(); } 1548 1549 if (n->unique_ctrl_out() != NULL) { 1550 continue; 1551 } 1552 1553 if (n->Opcode() == Op_NeverBranch) { 1554 Node* taken = n->as_Multi()->proj_out(0); 1555 if (!controls.member(taken)) { 1556 if (trace) { tty->print("X not seen"); taken->dump(); } 1557 return false; 1558 } 1559 continue; 1560 } 1561 1562 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1563 Node* u = n->fast_out(j); 1564 1565 if (u->is_CFG()) { 1566 if (!controls.member(u)) { 1567 if (u->is_Proj() && u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { 1568 if (trace) { tty->print("X not seen but unc"); u->dump(); } 1569 } else { 1570 Node* c = u; 1571 do { 1572 c = c->unique_ctrl_out(); 1573 } while (c != NULL && c->is_Region()); 1574 if (c != NULL && c->Opcode() == Op_Halt) { 1575 if (trace) { tty->print("X not seen but halt"); c->dump(); } 1576 } else { 1577 if (trace) { tty->print("X not seen"); u->dump(); } 1578 return false; 1579 } 1580 } 1581 } else { 1582 if (trace) { tty->print("X seen"); u->dump(); } 1583 } 1584 } 1585 } 1586 } 1587 return true; 1588 } 1589 #endif 1590 1591 Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) { 1592 ResourceMark rm; 1593 VectorSet wq(Thread::current()->resource_area()); 1594 wq.set(mem->_idx); 1595 mem_ctrl = phase->get_ctrl(mem); 1596 while (!is_dominator(mem_ctrl, rep_ctrl, mem, n, phase)) { 1597 mem = next_mem(mem, alias); 1598 if (wq.test_set(mem->_idx)) { 1599 return NULL; // hit an unexpected loop 1600 } 1601 mem_ctrl = phase->ctrl_or_self(mem); 1602 } 1603 if (mem->is_MergeMem()) { 1604 mem = mem->as_MergeMem()->memory_at(alias); 1605 mem_ctrl = phase->ctrl_or_self(mem); 1606 } 1607 return mem; 1608 } 1609 1610 Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { 1611 ResourceMark rm; 1612 VectorSet wq(Thread::current()->resource_area()); 1613 wq.set(mem->_idx); 1614 mem_ctrl = phase->ctrl_or_self(mem); 1615 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) { 1616 mem = next_mem(mem, alias); 1617 if (wq.test_set(mem->_idx)) { 1618 return NULL; 1619 } 1620 mem_ctrl = phase->ctrl_or_self(mem); 1621 } 1622 if (mem->is_MergeMem()) { 1623 mem = mem->as_MergeMem()->memory_at(alias); 1624 mem_ctrl = phase->ctrl_or_self(mem); 1625 } 1626 return mem; 1627 } 1628 1629 static void disconnect_barrier_mem(Node* wb, PhaseIterGVN& igvn) { 1630 Node* mem_in = wb->in(ShenandoahBarrierNode::Memory); 1631 Node* proj = wb->find_out_with(Op_ShenandoahWBMemProj); 1632 1633 for (DUIterator_Last imin, i = proj->last_outs(imin); i >= imin; ) { 1634 Node* u = proj->last_out(i); 1635 igvn.rehash_node_delayed(u); 1636 int nb = u->replace_edge(proj, mem_in); 1637 assert(nb > 0, "no replacement?"); 1638 i -= nb; 1639 } 1640 } 1641 1642 Node* ShenandoahWriteBarrierNode::move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase) { 1643 Node* entry = cl->skip_strip_mined(-1)->in(LoopNode::EntryControl); 1644 Node* above_pred = phase->skip_all_loop_predicates(entry); 1645 Node* ctrl = entry; 1646 while (ctrl != above_pred) { 1647 Node* next = ctrl->in(0); 1648 if (!phase->is_dominator(val_ctrl, next)) { 1649 break; 1650 } 1651 ctrl = next; 1652 } 1653 return ctrl; 1654 } 1655 1656 static MemoryGraphFixer* find_fixer(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, int alias) { 1657 for (int i = 0; i < memory_graph_fixers.length(); i++) { 1658 if (memory_graph_fixers.at(i)->alias() == alias) { 1659 return memory_graph_fixers.at(i); 1660 } 1661 } 1662 return NULL; 1663 } 1664 1665 static MemoryGraphFixer* create_fixer(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, int alias, PhaseIdealLoop* phase, bool include_lsm) { 1666 assert(find_fixer(memory_graph_fixers, alias) == NULL, "none should exist yet"); 1667 MemoryGraphFixer* fixer = new MemoryGraphFixer(alias, include_lsm, phase); 1668 memory_graph_fixers.push(fixer); 1669 return fixer; 1670 } 1671 1672 void ShenandoahWriteBarrierNode::try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) { 1673 assert(cl->is_Loop(), "bad control"); 1674 Node* ctrl = move_above_predicates(cl, val_ctrl, phase); 1675 Node* mem_ctrl = NULL; 1676 int alias = phase->C->get_alias_index(adr_type()); 1677 1678 MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias); 1679 if (fixer == NULL) { 1680 fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm); 1681 } 1682 1683 Node* proj = find_out_with(Op_ShenandoahWBMemProj); 1684 1685 fixer->remove(proj); 1686 Node* mem = fixer->find_mem(ctrl, NULL); 1687 1688 assert(!ShenandoahVerifyOptoBarriers || memory_dominates_all_paths(mem, ctrl, alias, phase), "can't fix the memory graph"); 1689 1690 phase->set_ctrl_and_loop(this, ctrl); 1691 phase->igvn().replace_input_of(this, Control, ctrl); 1692 1693 disconnect_barrier_mem(this, phase->igvn()); 1694 1695 phase->igvn().replace_input_of(this, Memory, mem); 1696 phase->set_ctrl_and_loop(proj, ctrl); 1697 1698 fixer->fix_mem(ctrl, ctrl, mem, mem, proj, uses); 1699 assert(proj->outcnt() > 0, "disconnected write barrier"); 1700 } 1701 1702 LoopNode* ShenandoahWriteBarrierNode::try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase) { 1703 // A write barrier between a pre and main loop can get in the way of 1704 // vectorization. Move it above the pre loop if possible 1705 CountedLoopNode* cl = NULL; 1706 if (c->is_IfFalse() && 1707 c->in(0)->is_CountedLoopEnd()) { 1708 cl = c->in(0)->as_CountedLoopEnd()->loopnode(); 1709 } else if (c->is_IfProj() && 1710 c->in(0)->is_If() && 1711 c->in(0)->in(0)->is_IfFalse() && 1712 c->in(0)->in(0)->in(0)->is_CountedLoopEnd()) { 1713 cl = c->in(0)->in(0)->in(0)->as_CountedLoopEnd()->loopnode(); 1714 } 1715 if (cl != NULL && 1716 cl->is_pre_loop() && 1717 val_ctrl != cl && 1718 phase->is_dominator(val_ctrl, cl)) { 1719 return cl; 1720 } 1721 return NULL; 1722 } 1723 1724 void ShenandoahWriteBarrierNode::try_move_before_loop(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) { 1725 Node *n_ctrl = phase->get_ctrl(this); 1726 IdealLoopTree *n_loop = phase->get_loop(n_ctrl); 1727 Node* val = in(ValueIn); 1728 Node* val_ctrl = phase->get_ctrl(val); 1729 if (n_loop != phase->ltree_root() && !n_loop->_irreducible) { 1730 IdealLoopTree *val_loop = phase->get_loop(val_ctrl); 1731 Node* mem = in(Memory); 1732 IdealLoopTree *mem_loop = phase->get_loop(phase->get_ctrl(mem)); 1733 if (!n_loop->is_member(val_loop) && 1734 n_loop->is_member(mem_loop)) { 1735 Node* n_loop_head = n_loop->_head; 1736 1737 if (n_loop_head->is_Loop()) { 1738 LoopNode* loop = n_loop_head->as_Loop(); 1739 if (n_loop_head->is_CountedLoop() && n_loop_head->as_CountedLoop()->is_main_loop()) { 1740 LoopNode* res = try_move_before_pre_loop(n_loop_head->in(LoopNode::EntryControl), val_ctrl, phase); 1741 if (res != NULL) { 1742 loop = res; 1743 } 1744 } 1745 1746 try_move_before_loop_helper(loop, val_ctrl, memory_graph_fixers, phase, include_lsm, uses); 1747 } 1748 } 1749 } 1750 LoopNode* ctrl = try_move_before_pre_loop(in(0), val_ctrl, phase); 1751 if (ctrl != NULL) { 1752 try_move_before_loop_helper(ctrl, val_ctrl, memory_graph_fixers, phase, include_lsm, uses); 1753 } 1754 } 1755 1756 Node* ShenandoahWriteBarrierNode::would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase) { 1757 Node* val = in(ValueIn); 1758 Node* val_ctrl = phase->get_ctrl(val); 1759 Node* other_mem = other->in(Memory); 1760 Node* other_ctrl = phase->get_ctrl(other); 1761 Node* this_ctrl = phase->get_ctrl(this); 1762 IdealLoopTree* this_loop = phase->get_loop(this_ctrl); 1763 IdealLoopTree* other_loop = phase->get_loop(other_ctrl); 1764 1765 Node* ctrl = phase->dom_lca(other_ctrl, this_ctrl); 1766 1767 if (ctrl->is_Proj() && 1768 ctrl->in(0)->is_Call() && 1769 ctrl->unique_ctrl_out() != NULL && 1770 ctrl->unique_ctrl_out()->Opcode() == Op_Catch && 1771 !phase->is_dominator(val_ctrl, ctrl->in(0)->in(0))) { 1772 return NULL; 1773 } 1774 1775 IdealLoopTree* loop = phase->get_loop(ctrl); 1776 1777 // We don't want to move a write barrier in a loop 1778 // If the LCA is in a inner loop, try a control out of loop if possible 1779 while (!loop->is_member(this_loop) && (other->Opcode() != Op_ShenandoahWriteBarrier || !loop->is_member(other_loop))) { 1780 ctrl = phase->idom(ctrl); 1781 if (ctrl->is_MultiBranch()) { 1782 ctrl = ctrl->in(0); 1783 } 1784 if (ctrl != val_ctrl && phase->is_dominator(ctrl, val_ctrl)) { 1785 return NULL; 1786 } 1787 loop = phase->get_loop(ctrl); 1788 } 1789 1790 if (ShenandoahDontIncreaseWBFreq) { 1791 Node* this_iffproj = no_branches(this_ctrl, ctrl, true, phase); 1792 if (other->Opcode() == Op_ShenandoahWriteBarrier) { 1793 Node* other_iffproj = no_branches(other_ctrl, ctrl, true, phase); 1794 if (other_iffproj == NULL || this_iffproj == NULL) { 1795 return ctrl; 1796 } else if (other_iffproj != NodeSentinel && this_iffproj != NodeSentinel && 1797 other_iffproj->in(0) == this_iffproj->in(0)) { 1798 return ctrl; 1799 } 1800 } else if (this_iffproj == NULL) { 1801 return ctrl; 1802 } 1803 return NULL; 1804 } 1805 1806 return ctrl; 1807 } 1808 1809 void ShenandoahWriteBarrierNode::optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*> memory_graph_fixers, bool include_lsm) { 1810 bool progress = false; 1811 Unique_Node_List uses; 1812 do { 1813 progress = false; 1814 for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) { 1815 ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i); 1816 1817 wb->try_move_before_loop(memory_graph_fixers, phase, include_lsm, uses); 1818 1819 Node* val = wb->in(ValueIn); 1820 1821 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { 1822 Node* u = val->fast_out(j); 1823 if (u != wb && u->is_ShenandoahBarrier()) { 1824 Node* rep_ctrl = wb->would_subsume(u->as_ShenandoahBarrier(), phase); 1825 1826 if (rep_ctrl != NULL) { 1827 Node* other = u; 1828 Node* val_ctrl = phase->get_ctrl(val); 1829 if (rep_ctrl->is_Proj() && 1830 rep_ctrl->in(0)->is_Call() && 1831 rep_ctrl->unique_ctrl_out() != NULL && 1832 rep_ctrl->unique_ctrl_out()->Opcode() == Op_Catch) { 1833 rep_ctrl = rep_ctrl->in(0)->in(0); 1834 1835 assert(phase->is_dominator(val_ctrl, rep_ctrl), "bad control"); 1836 } else { 1837 LoopNode* c = ShenandoahWriteBarrierNode::try_move_before_pre_loop(rep_ctrl, val_ctrl, phase); 1838 if (c != NULL) { 1839 rep_ctrl = ShenandoahWriteBarrierNode::move_above_predicates(c, val_ctrl, phase); 1840 } else { 1841 while (rep_ctrl->is_IfProj()) { 1842 CallStaticJavaNode* unc = rep_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1843 if (unc != NULL) { 1844 int req = unc->uncommon_trap_request(); 1845 Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); 1846 if ((trap_reason == Deoptimization::Reason_loop_limit_check || 1847 trap_reason == Deoptimization::Reason_predicate || 1848 trap_reason == Deoptimization::Reason_profile_predicate) && 1849 phase->is_dominator(val_ctrl, rep_ctrl->in(0)->in(0))) { 1850 rep_ctrl = rep_ctrl->in(0)->in(0); 1851 continue; 1852 } 1853 } 1854 break; 1855 } 1856 } 1857 } 1858 1859 Node* wb_ctrl = phase->get_ctrl(wb); 1860 Node* other_ctrl = phase->get_ctrl(other); 1861 int alias = phase->C->get_alias_index(wb->adr_type()); 1862 MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);; 1863 if (!is_dominator(wb_ctrl, other_ctrl, wb, other, phase)) { 1864 if (fixer == NULL) { 1865 fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm); 1866 } 1867 Node* mem = fixer->find_mem(rep_ctrl, phase->get_ctrl(other) == rep_ctrl ? other : NULL); 1868 1869 if (mem->has_out_with(Op_Lock) || mem->has_out_with(Op_Unlock)) { 1870 continue; 1871 } 1872 1873 Node* wb_proj = wb->find_out_with(Op_ShenandoahWBMemProj); 1874 fixer->remove(wb_proj); 1875 Node* mem_for_ctrl = fixer->find_mem(rep_ctrl, NULL); 1876 1877 if (wb->in(Memory) != mem) { 1878 disconnect_barrier_mem(wb, phase->igvn()); 1879 phase->igvn().replace_input_of(wb, Memory, mem); 1880 } 1881 if (rep_ctrl != wb_ctrl) { 1882 phase->set_ctrl_and_loop(wb, rep_ctrl); 1883 phase->igvn().replace_input_of(wb, Control, rep_ctrl); 1884 phase->set_ctrl_and_loop(wb_proj, rep_ctrl); 1885 progress = true; 1886 } 1887 1888 fixer->fix_mem(rep_ctrl, rep_ctrl, mem, mem_for_ctrl, wb_proj, uses); 1889 1890 assert(!ShenandoahVerifyOptoBarriers || ShenandoahWriteBarrierNode::memory_dominates_all_paths(mem, rep_ctrl, alias, phase), "can't fix the memory graph"); 1891 } 1892 1893 if (other->Opcode() == Op_ShenandoahWriteBarrier) { 1894 Node* other_proj = other->find_out_with(Op_ShenandoahWBMemProj); 1895 if (fixer != NULL) { 1896 fixer->remove(other_proj); 1897 } 1898 phase->igvn().replace_node(other_proj, other->in(Memory)); 1899 } 1900 phase->igvn().replace_node(other, wb); 1901 --j; --jmax; 1902 } 1903 } 1904 } 1905 } 1906 } while(progress); 1907 } 1908 1909 void ShenandoahReadBarrierNode::try_move(Node *n_ctrl, PhaseIdealLoop* phase) { 1910 Node* mem = in(MemNode::Memory); 1911 int alias = phase->C->get_alias_index(adr_type()); 1912 const bool trace = false; 1913 1914 #ifdef ASSERT 1915 if (trace) { tty->print("Trying to move mem of"); dump(); } 1916 #endif 1917 1918 Node* new_mem = mem; 1919 1920 ResourceMark rm; 1921 VectorSet seen(Thread::current()->resource_area()); 1922 Node_List phis; 1923 1924 for (;;) { 1925 #ifdef ASSERT 1926 if (trace) { tty->print("Looking for dominator from"); mem->dump(); } 1927 #endif 1928 if (mem->is_Proj() && mem->in(0)->is_Start()) { 1929 if (new_mem != in(MemNode::Memory)) { 1930 #ifdef ASSERT 1931 if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } 1932 #endif 1933 phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); 1934 } 1935 return; 1936 } 1937 1938 Node* candidate = mem; 1939 do { 1940 if (!is_independent(mem)) { 1941 if (trace) { tty->print_cr("Not independent"); } 1942 if (new_mem != in(MemNode::Memory)) { 1943 #ifdef ASSERT 1944 if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } 1945 #endif 1946 phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); 1947 } 1948 return; 1949 } 1950 if (seen.test_set(mem->_idx)) { 1951 if (trace) { tty->print_cr("Already seen"); } 1952 ShouldNotReachHere(); 1953 // Strange graph 1954 if (new_mem != in(MemNode::Memory)) { 1955 #ifdef ASSERT 1956 if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } 1957 #endif 1958 phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); 1959 } 1960 return; 1961 } 1962 if (mem->is_Phi()) { 1963 phis.push(mem); 1964 } 1965 mem = next_mem(mem, alias); 1966 if (mem->bottom_type() == Type::MEMORY) { 1967 candidate = mem; 1968 } 1969 assert(is_dominator(phase->ctrl_or_self(mem), n_ctrl, mem, this, phase) == phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl), "strange dominator"); 1970 #ifdef ASSERT 1971 if (trace) { tty->print("Next mem is"); mem->dump(); } 1972 #endif 1973 } while (mem->bottom_type() != Type::MEMORY || !phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl)); 1974 1975 assert(mem->bottom_type() == Type::MEMORY, "bad mem"); 1976 1977 bool not_dom = false; 1978 for (uint i = 0; i < phis.size() && !not_dom; i++) { 1979 Node* nn = phis.at(i); 1980 1981 #ifdef ASSERT 1982 if (trace) { tty->print("Looking from phi"); nn->dump(); } 1983 #endif 1984 assert(nn->is_Phi(), "phis only"); 1985 for (uint j = 2; j < nn->req() && !not_dom; j++) { 1986 Node* m = nn->in(j); 1987 #ifdef ASSERT 1988 if (trace) { tty->print("Input %d is", j); m->dump(); } 1989 #endif 1990 while (m != mem && !seen.test_set(m->_idx)) { 1991 if (is_dominator(phase->ctrl_or_self(m), phase->ctrl_or_self(mem), m, mem, phase)) { 1992 not_dom = true; 1993 // Scheduling anomaly 1994 #ifdef ASSERT 1995 if (trace) { tty->print("Giving up"); m->dump(); } 1996 #endif 1997 break; 1998 } 1999 if (!is_independent(m)) { 2000 if (trace) { tty->print_cr("Not independent"); } 2001 if (new_mem != in(MemNode::Memory)) { 2002 #ifdef ASSERT 2003 if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } 2004 #endif 2005 phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); 2006 } 2007 return; 2008 } 2009 if (m->is_Phi()) { 2010 phis.push(m); 2011 } 2012 m = next_mem(m, alias); 2013 #ifdef ASSERT 2014 if (trace) { tty->print("Next mem is"); m->dump(); } 2015 #endif 2016 } 2017 } 2018 } 2019 if (!not_dom) { 2020 new_mem = mem; 2021 phis.clear(); 2022 } else { 2023 seen.Clear(); 2024 } 2025 } 2026 } 2027 2028 CallStaticJavaNode* ShenandoahWriteBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) { 2029 Node* val = in(ValueIn); 2030 2031 const Type* val_t = igvn.type(val); 2032 2033 if (val_t->meet(TypePtr::NULL_PTR) != val_t && 2034 val->Opcode() == Op_CastPP && 2035 val->in(0) != NULL && 2036 val->in(0)->Opcode() == Op_IfTrue && 2037 val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && 2038 val->in(0)->in(0)->is_If() && 2039 val->in(0)->in(0)->in(1)->Opcode() == Op_Bool && 2040 val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && 2041 val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && 2042 val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) && 2043 val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { 2044 assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), ""); 2045 CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 2046 return unc; 2047 } 2048 return NULL; 2049 } 2050 2051 void ShenandoahWriteBarrierNode::pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, Unique_Node_List& uses) { 2052 Node* unc = pin_and_expand_null_check(phase->igvn()); 2053 Node* val = in(ValueIn); 2054 2055 if (unc != NULL) { 2056 Node* ctrl = phase->get_ctrl(this); 2057 Node* unc_ctrl = val->in(0); 2058 2059 // Don't move write barrier in a loop 2060 IdealLoopTree* loop = phase->get_loop(ctrl); 2061 IdealLoopTree* unc_loop = phase->get_loop(unc_ctrl); 2062 2063 if (!unc_loop->is_member(loop)) { 2064 return; 2065 } 2066 2067 Node* branch = no_branches(ctrl, unc_ctrl, false, phase); 2068 assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch"); 2069 if (branch == NodeSentinel) { 2070 return; 2071 } 2072 2073 RegionNode* r = new RegionNode(3); 2074 IfNode* iff = unc_ctrl->in(0)->as_If(); 2075 2076 Node* ctrl_use = unc_ctrl->unique_ctrl_out(); 2077 Node* unc_ctrl_clone = unc_ctrl->clone(); 2078 phase->register_control(unc_ctrl_clone, loop, iff); 2079 Node* c = unc_ctrl_clone; 2080 Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase); 2081 r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0)); 2082 2083 phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0)); 2084 phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl)); 2085 phase->lazy_replace(c, unc_ctrl); 2086 c = NULL;; 2087 phase->igvn().replace_input_of(val, 0, unc_ctrl_clone); 2088 phase->set_ctrl(val, unc_ctrl_clone); 2089 2090 IfNode* new_iff = new_cast->in(0)->in(0)->as_If(); 2091 fix_null_check(unc, unc_ctrl_clone, r, uses, phase); 2092 Node* iff_proj = iff->proj_out(0); 2093 r->init_req(2, iff_proj); 2094 phase->register_control(r, phase->ltree_root(), iff); 2095 2096 Node* new_bol = new_iff->in(1)->clone(); 2097 Node* new_cmp = new_bol->in(1)->clone(); 2098 assert(new_cmp->Opcode() == Op_CmpP, "broken"); 2099 assert(new_cmp->in(1) == val->in(1), "broken"); 2100 new_bol->set_req(1, new_cmp); 2101 new_cmp->set_req(1, this); 2102 phase->register_new_node(new_bol, new_iff->in(0)); 2103 phase->register_new_node(new_cmp, new_iff->in(0)); 2104 phase->igvn().replace_input_of(new_iff, 1, new_bol); 2105 phase->igvn().replace_input_of(new_cast, 1, this); 2106 2107 for (DUIterator_Fast imax, i = this->fast_outs(imax); i < imax; i++) { 2108 Node* u = this->fast_out(i); 2109 if (u == new_cast || u->Opcode() == Op_ShenandoahWBMemProj || u == new_cmp) { 2110 continue; 2111 } 2112 phase->igvn().rehash_node_delayed(u); 2113 int nb = u->replace_edge(this, new_cast); 2114 assert(nb > 0, "no update?"); 2115 --i; imax -= nb; 2116 } 2117 2118 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { 2119 Node* u = val->fast_out(i); 2120 if (u == this) { 2121 continue; 2122 } 2123 phase->igvn().rehash_node_delayed(u); 2124 int nb = u->replace_edge(val, new_cast); 2125 assert(nb > 0, "no update?"); 2126 --i; imax -= nb; 2127 } 2128 2129 Node* new_ctrl = unc_ctrl_clone; 2130 2131 int alias = phase->C->get_alias_index(adr_type()); 2132 MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias); 2133 if (fixer == NULL) { 2134 fixer = create_fixer(memory_graph_fixers, alias, phase, true); 2135 } 2136 2137 Node* proj = find_out_with(Op_ShenandoahWBMemProj); 2138 fixer->remove(proj); 2139 Node* mem = fixer->find_mem(new_ctrl, NULL); 2140 2141 if (in(Memory) != mem) { 2142 disconnect_barrier_mem(this, phase->igvn()); 2143 phase->igvn().replace_input_of(this, Memory, mem); 2144 } 2145 2146 phase->set_ctrl_and_loop(this, new_ctrl); 2147 phase->igvn().replace_input_of(this, Control, new_ctrl); 2148 phase->set_ctrl_and_loop(proj, new_ctrl); 2149 2150 fixer->fix_mem(new_ctrl, new_ctrl, mem, mem, proj, uses); 2151 } 2152 } 2153 2154 void ShenandoahWriteBarrierNode::pin_and_expand_helper(PhaseIdealLoop* phase) { 2155 Node* val = in(ValueIn); 2156 CallStaticJavaNode* unc = pin_and_expand_null_check(phase->igvn()); 2157 Node* rep = this; 2158 Node* ctrl = phase->get_ctrl(this); 2159 if (unc != NULL && val->in(0) == ctrl) { 2160 Node* unc_ctrl = val->in(0); 2161 IfNode* other_iff = unc_ctrl->unique_ctrl_out()->as_If(); 2162 ProjNode* other_unc_ctrl = other_iff->proj_out(1); 2163 Node* cast = NULL; 2164 for (DUIterator_Fast imax, i = other_unc_ctrl->fast_outs(imax); i < imax && cast == NULL; i++) { 2165 Node* u = other_unc_ctrl->fast_out(i); 2166 if (u->Opcode() == Op_CastPP && u->in(1) == this) { 2167 cast = u; 2168 } 2169 } 2170 assert(other_unc_ctrl->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) == unc, "broken"); 2171 rep = cast; 2172 } 2173 2174 // Replace all uses of barrier's input that are dominated by ctrl 2175 // with the value returned by the barrier: no need to keep both 2176 // live. 2177 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { 2178 Node* u = val->fast_out(i); 2179 if (u != this) { 2180 if (u->is_Phi()) { 2181 int nb = 0; 2182 for (uint j = 1; j < u->req(); j++) { 2183 if (u->in(j) == val) { 2184 Node* c = u->in(0)->in(j); 2185 if (phase->is_dominator(ctrl, c)) { 2186 phase->igvn().replace_input_of(u, j, rep); 2187 nb++; 2188 } 2189 } 2190 } 2191 if (nb > 0) { 2192 imax -= nb; 2193 --i; 2194 } 2195 } else { 2196 Node* c = phase->ctrl_or_self(u); 2197 if (is_dominator(ctrl, c, this, u, phase)) { 2198 phase->igvn().rehash_node_delayed(u); 2199 int nb = u->replace_edge(val, rep); 2200 assert(nb > 0, "no update?"); 2201 --i, imax -= nb; 2202 } 2203 } 2204 } 2205 } 2206 } 2207 2208 Node* ShenandoahWriteBarrierNode::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { 2209 Node* mem = NULL; 2210 Node* c = ctrl; 2211 do { 2212 if (c->is_Region()) { 2213 Node* phi_bottom = NULL; 2214 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) { 2215 Node* u = c->fast_out(i); 2216 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { 2217 if (u->adr_type() == TypePtr::BOTTOM) { 2218 mem = u; 2219 } 2220 } 2221 } 2222 } else { 2223 if (c->is_Call() && c->as_Call()->adr_type() != NULL) { 2224 CallProjections projs; 2225 c->as_Call()->extract_projections(&projs, true, false); 2226 if (projs.fallthrough_memproj != NULL) { 2227 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { 2228 if (projs.catchall_memproj == NULL) { 2229 mem = projs.fallthrough_memproj; 2230 } else { 2231 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) { 2232 mem = projs.fallthrough_memproj; 2233 } else { 2234 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier"); 2235 mem = projs.catchall_memproj; 2236 } 2237 } 2238 } 2239 } else { 2240 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory); 2241 if (proj != NULL && 2242 proj->adr_type() == TypePtr::BOTTOM) { 2243 mem = proj; 2244 } 2245 } 2246 } else { 2247 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 2248 Node* u = c->fast_out(i); 2249 if (u->is_Proj() && 2250 u->bottom_type() == Type::MEMORY && 2251 u->adr_type() == TypePtr::BOTTOM) { 2252 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), ""); 2253 assert(mem == NULL, "only one proj"); 2254 mem = u; 2255 } 2256 } 2257 assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected"); 2258 } 2259 } 2260 c = phase->idom(c); 2261 } while (mem == NULL); 2262 return mem; 2263 } 2264 2265 void ShenandoahWriteBarrierNode::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { 2266 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2267 Node* u = n->fast_out(i); 2268 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { 2269 uses.push(u); 2270 } 2271 } 2272 } 2273 2274 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) { 2275 OuterStripMinedLoopEndNode* le = inner->outer_loop_end(); 2276 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl)); 2277 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl)); 2278 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt); 2279 phase->register_control(new_le, phase->get_loop(le), le->in(0)); 2280 phase->lazy_replace(outer, new_outer); 2281 phase->lazy_replace(le, new_le); 2282 inner->clear_strip_mined(); 2283 } 2284 2285 void ShenandoahWriteBarrierNode::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, 2286 PhaseIdealLoop* phase) { 2287 IdealLoopTree* loop = phase->get_loop(ctrl); 2288 Node* thread = new ThreadLocalNode(); 2289 phase->register_new_node(thread, ctrl); 2290 Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 2291 phase->set_ctrl(offset, phase->C->root()); 2292 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset); 2293 phase->register_new_node(gc_state_addr, ctrl); 2294 uint gc_state_idx = Compile::AliasIdxRaw; 2295 const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument 2296 debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); 2297 2298 Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered); 2299 phase->register_new_node(gc_state, ctrl); 2300 Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)); 2301 phase->register_new_node(heap_stable_and, ctrl); 2302 Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT)); 2303 phase->register_new_node(heap_stable_cmp, ctrl); 2304 Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne); 2305 phase->register_new_node(heap_stable_test, ctrl); 2306 IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 2307 phase->register_control(heap_stable_iff, loop, ctrl); 2308 2309 heap_stable_ctrl = new IfFalseNode(heap_stable_iff); 2310 phase->register_control(heap_stable_ctrl, loop, heap_stable_iff); 2311 ctrl = new IfTrueNode(heap_stable_iff); 2312 phase->register_control(ctrl, loop, heap_stable_iff); 2313 2314 assert(is_heap_stable_test(heap_stable_iff), "Should match the shape"); 2315 } 2316 2317 void ShenandoahWriteBarrierNode::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { 2318 const Type* val_t = phase->igvn().type(val); 2319 if (val_t->meet(TypePtr::NULL_PTR) == val_t) { 2320 IdealLoopTree* loop = phase->get_loop(ctrl); 2321 Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT)); 2322 phase->register_new_node(null_cmp, ctrl); 2323 Node* null_test = new BoolNode(null_cmp, BoolTest::ne); 2324 phase->register_new_node(null_test, ctrl); 2325 IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 2326 phase->register_control(null_iff, loop, ctrl); 2327 ctrl = new IfTrueNode(null_iff); 2328 phase->register_control(ctrl, loop, null_iff); 2329 null_ctrl = new IfFalseNode(null_iff); 2330 phase->register_control(null_ctrl, loop, null_iff); 2331 } 2332 } 2333 2334 Node* ShenandoahWriteBarrierNode::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) { 2335 IdealLoopTree *loop = phase->get_loop(c); 2336 Node* iff = unc_ctrl->in(0); 2337 assert(iff->is_If(), "broken"); 2338 Node* new_iff = iff->clone(); 2339 new_iff->set_req(0, c); 2340 phase->register_control(new_iff, loop, c); 2341 Node* iffalse = new IfFalseNode(new_iff->as_If()); 2342 phase->register_control(iffalse, loop, new_iff); 2343 Node* iftrue = new IfTrueNode(new_iff->as_If()); 2344 phase->register_control(iftrue, loop, new_iff); 2345 c = iftrue; 2346 const Type *t = phase->igvn().type(val); 2347 assert(val->Opcode() == Op_CastPP, "expect cast to non null here"); 2348 Node* uncasted_val = val->in(1); 2349 val = new CastPPNode(uncasted_val, t); 2350 val->init_req(0, c); 2351 phase->register_new_node(val, c); 2352 return val; 2353 } 2354 2355 void ShenandoahWriteBarrierNode::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, 2356 Unique_Node_List& uses, PhaseIdealLoop* phase) { 2357 IfNode* iff = unc_ctrl->in(0)->as_If(); 2358 Node* proj = iff->proj_out(0); 2359 assert(proj != unc_ctrl, "bad projection"); 2360 Node* use = proj->unique_ctrl_out(); 2361 2362 assert(use == unc || use->is_Region(), "what else?"); 2363 2364 uses.clear(); 2365 if (use == unc) { 2366 phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use)); 2367 for (uint i = 1; i < unc->req(); i++) { 2368 Node* n = unc->in(i); 2369 if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) { 2370 uses.push(n); 2371 } 2372 } 2373 } else { 2374 assert(use->is_Region(), "what else?"); 2375 uint idx = 1; 2376 for (; use->in(idx) != proj; idx++); 2377 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 2378 Node* u = use->fast_out(i); 2379 if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) { 2380 uses.push(u->in(idx)); 2381 } 2382 } 2383 } 2384 for(uint next = 0; next < uses.size(); next++ ) { 2385 Node *n = uses.at(next); 2386 assert(phase->get_ctrl(n) == proj, "bad control"); 2387 phase->set_ctrl_and_loop(n, new_unc_ctrl); 2388 if (n->in(0) == proj) { 2389 phase->igvn().replace_input_of(n, 0, new_unc_ctrl); 2390 } 2391 for (uint i = 0; i < n->req(); i++) { 2392 Node* m = n->in(i); 2393 if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) { 2394 uses.push(m); 2395 } 2396 } 2397 } 2398 2399 phase->igvn().rehash_node_delayed(use); 2400 int nb = use->replace_edge(proj, new_unc_ctrl); 2401 assert(nb == 1, "only use expected"); 2402 } 2403 2404 void ShenandoahWriteBarrierNode::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { 2405 IdealLoopTree *loop = phase->get_loop(ctrl); 2406 Node* raw_rbtrue = new CastP2XNode(ctrl, val); 2407 phase->register_new_node(raw_rbtrue, ctrl); 2408 Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint())); 2409 phase->register_new_node(cset_offset, ctrl); 2410 Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr())); 2411 phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root()); 2412 Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset); 2413 phase->register_new_node(in_cset_fast_test_adr, ctrl); 2414 uint in_cset_fast_test_idx = Compile::AliasIdxRaw; 2415 const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument 2416 debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx)); 2417 Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered); 2418 phase->register_new_node(in_cset_fast_test_load, ctrl); 2419 Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT)); 2420 phase->register_new_node(in_cset_fast_test_cmp, ctrl); 2421 Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq); 2422 phase->register_new_node(in_cset_fast_test_test, ctrl); 2423 IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 2424 phase->register_control(in_cset_fast_test_iff, loop, ctrl); 2425 2426 not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff); 2427 phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff); 2428 2429 ctrl = new IfFalseNode(in_cset_fast_test_iff); 2430 phase->register_control(ctrl, loop, in_cset_fast_test_iff); 2431 } 2432 2433 void ShenandoahWriteBarrierNode::call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem, 2434 Node* raw_mem, Node* wb_mem, 2435 int alias, 2436 PhaseIdealLoop* phase) { 2437 IdealLoopTree*loop = phase->get_loop(ctrl); 2438 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst(); 2439 2440 // The slow path stub consumes and produces raw memory in addition 2441 // to the existing memory edges 2442 Node* base = find_bottom_mem(ctrl, phase); 2443 2444 MergeMemNode* mm = MergeMemNode::make(base); 2445 mm->set_memory_at(alias, wb_mem); 2446 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); 2447 phase->register_new_node(mm, ctrl); 2448 2449 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM); 2450 call->init_req(TypeFunc::Control, ctrl); 2451 call->init_req(TypeFunc::I_O, phase->C->top()); 2452 call->init_req(TypeFunc::Memory, mm); 2453 call->init_req(TypeFunc::FramePtr, phase->C->top()); 2454 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 2455 call->init_req(TypeFunc::Parms, val); 2456 phase->register_control(call, loop, ctrl); 2457 ctrl = new ProjNode(call, TypeFunc::Control); 2458 phase->register_control(ctrl, loop, call); 2459 result_mem = new ProjNode(call, TypeFunc::Memory); 2460 phase->register_new_node(result_mem, call); 2461 val = new ProjNode(call, TypeFunc::Parms); 2462 phase->register_new_node(val, call); 2463 val = new CheckCastPPNode(ctrl, val, obj_type); 2464 phase->register_new_node(val, ctrl); 2465 } 2466 2467 void ShenandoahWriteBarrierNode::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { 2468 Node* ctrl = phase->get_ctrl(barrier); 2469 Node* init_raw_mem = fixer.find_mem(ctrl, barrier); 2470 2471 // Update the control of all nodes that should be after the 2472 // barrier control flow 2473 uses.clear(); 2474 // Every node that is control dependent on the barrier's input 2475 // control will be after the expanded barrier. The raw memory (if 2476 // its memory is control dependent on the barrier's input control) 2477 // must stay above the barrier. 2478 uses_to_ignore.clear(); 2479 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) { 2480 uses_to_ignore.push(init_raw_mem); 2481 } 2482 for (uint next = 0; next < uses_to_ignore.size(); next++) { 2483 Node *n = uses_to_ignore.at(next); 2484 for (uint i = 0; i < n->req(); i++) { 2485 Node* in = n->in(i); 2486 if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) { 2487 uses_to_ignore.push(in); 2488 } 2489 } 2490 } 2491 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { 2492 Node* u = ctrl->fast_out(i); 2493 if (u->_idx < last && 2494 u != barrier && 2495 !uses_to_ignore.member(u) && 2496 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) && 2497 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) { 2498 Node* old_c = phase->ctrl_or_self(u); 2499 Node* c = old_c; 2500 if (c != ctrl || 2501 is_dominator_same_ctrl(old_c, barrier, u, phase) || 2502 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) { 2503 phase->igvn().rehash_node_delayed(u); 2504 int nb = u->replace_edge(ctrl, region); 2505 if (u->is_CFG()) { 2506 if (phase->idom(u) == ctrl) { 2507 phase->set_idom(u, region, phase->dom_depth(region)); 2508 } 2509 } else if (phase->get_ctrl(u) == ctrl) { 2510 assert(u != init_raw_mem, "should leave input raw mem above the barrier"); 2511 uses.push(u); 2512 } 2513 assert(nb == 1, "more than 1 ctrl input?"); 2514 --i, imax -= nb; 2515 } 2516 } 2517 } 2518 } 2519 2520 void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) { 2521 Node_List enqueue_barriers; 2522 if (ShenandoahStoreValEnqueueBarrier) { 2523 Unique_Node_List wq; 2524 wq.push(phase->C->root()); 2525 for (uint i = 0; i < wq.size(); i++) { 2526 Node* n = wq.at(i); 2527 if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { 2528 enqueue_barriers.push(n); 2529 } 2530 for (uint i = 0; i < n->req(); i++) { 2531 Node* in = n->in(i); 2532 if (in != NULL) { 2533 wq.push(in); 2534 } 2535 } 2536 } 2537 } 2538 2539 const bool trace = false; 2540 2541 // Collect raw memory state at CFG points in the entire graph and 2542 // record it in memory_nodes. Optimize the raw memory graph in the 2543 // process. Optimizing the memory graph also makes the memory graph 2544 // simpler. 2545 GrowableArray<MemoryGraphFixer*> memory_graph_fixers; 2546 2547 // Let's try to common write barriers again 2548 optimize_before_expansion(phase, memory_graph_fixers, true); 2549 2550 Unique_Node_List uses; 2551 for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) { 2552 ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i); 2553 Node* ctrl = phase->get_ctrl(wb); 2554 2555 Node* val = wb->in(ValueIn); 2556 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { 2557 assert(is_dominator(phase->get_ctrl(val), ctrl->in(0)->in(0), val, ctrl->in(0), phase), "can't move"); 2558 phase->set_ctrl(wb, ctrl->in(0)->in(0)); 2559 } else if (ctrl->is_CallRuntime()) { 2560 assert(is_dominator(phase->get_ctrl(val), ctrl->in(0), val, ctrl, phase), "can't move"); 2561 phase->set_ctrl(wb, ctrl->in(0)); 2562 } 2563 2564 assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "only for write barriers"); 2565 // Look for a null check that dominates this barrier and move the 2566 // barrier right after the null check to enable implicit null 2567 // checks 2568 wb->pin_and_expand_move_barrier(phase, memory_graph_fixers, uses); 2569 2570 wb->pin_and_expand_helper(phase); 2571 } 2572 2573 for (uint i = 0; i < enqueue_barriers.size(); i++) { 2574 Node* barrier = enqueue_barriers.at(i); 2575 Node* ctrl = phase->get_ctrl(barrier); 2576 IdealLoopTree* loop = phase->get_loop(ctrl); 2577 if (loop->_head->is_OuterStripMinedLoop()) { 2578 // Expanding a barrier here will break loop strip mining 2579 // verification. Transform the loop so the loop nest doesn't 2580 // appear as strip mined. 2581 OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); 2582 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 2583 } 2584 } 2585 2586 for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) { 2587 int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); 2588 ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1); 2589 Node* ctrl = phase->get_ctrl(wb); 2590 IdealLoopTree* loop = phase->get_loop(ctrl); 2591 if (loop->_head->is_OuterStripMinedLoop()) { 2592 // Expanding a barrier here will break loop strip mining 2593 // verification. Transform the loop so the loop nest doesn't 2594 // appear as strip mined. 2595 OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); 2596 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 2597 } 2598 } 2599 2600 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); 2601 Unique_Node_List uses_to_ignore; 2602 for (uint i = 0; i < enqueue_barriers.size(); i++) { 2603 Node* barrier = enqueue_barriers.at(i); 2604 Node* pre_val = barrier->in(1); 2605 2606 if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) { 2607 ShouldNotReachHere(); 2608 continue; 2609 } 2610 2611 Node* ctrl = phase->get_ctrl(barrier); 2612 2613 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { 2614 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move"); 2615 ctrl = ctrl->in(0)->in(0); 2616 phase->set_ctrl(barrier, ctrl); 2617 } else if (ctrl->is_CallRuntime()) { 2618 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move"); 2619 ctrl = ctrl->in(0); 2620 phase->set_ctrl(barrier, ctrl); 2621 } 2622 2623 Node* init_ctrl = ctrl; 2624 IdealLoopTree* loop = phase->get_loop(ctrl); 2625 Node* raw_mem = fixer.find_mem(ctrl, barrier); 2626 Node* init_raw_mem = raw_mem; 2627 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); 2628 Node* heap_stable_ctrl = NULL; 2629 Node* null_ctrl = NULL; 2630 uint last = phase->C->unique(); 2631 2632 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; 2633 Node* region = new RegionNode(PATH_LIMIT); 2634 Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 2635 2636 enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 }; 2637 Node* region2 = new RegionNode(PATH_LIMIT2); 2638 Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 2639 2640 // Stable path. 2641 test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); 2642 region->init_req(_heap_stable, heap_stable_ctrl); 2643 phi->init_req(_heap_stable, raw_mem); 2644 2645 // Null path 2646 Node* reg2_ctrl = NULL; 2647 test_null(ctrl, pre_val, null_ctrl, phase); 2648 if (null_ctrl != NULL) { 2649 reg2_ctrl = null_ctrl->in(0); 2650 region2->init_req(_null_path, null_ctrl); 2651 phi2->init_req(_null_path, raw_mem); 2652 } else { 2653 region2->del_req(_null_path); 2654 phi2->del_req(_null_path); 2655 } 2656 2657 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 2658 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 2659 Node* thread = new ThreadLocalNode(); 2660 phase->register_new_node(thread, ctrl); 2661 Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset)); 2662 phase->register_new_node(buffer_adr, ctrl); 2663 Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset)); 2664 phase->register_new_node(index_adr, ctrl); 2665 2666 BasicType index_bt = TypeX_X->basic_type(); 2667 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 2668 const TypePtr* adr_type = TypeRawPtr::BOTTOM; 2669 Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered); 2670 phase->register_new_node(index, ctrl); 2671 Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0)); 2672 phase->register_new_node(index_cmp, ctrl); 2673 Node* index_test = new BoolNode(index_cmp, BoolTest::ne); 2674 phase->register_new_node(index_test, ctrl); 2675 IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 2676 if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff; 2677 phase->register_control(queue_full_iff, loop, ctrl); 2678 Node* not_full = new IfTrueNode(queue_full_iff); 2679 phase->register_control(not_full, loop, queue_full_iff); 2680 Node* full = new IfFalseNode(queue_full_iff); 2681 phase->register_control(full, loop, queue_full_iff); 2682 2683 ctrl = not_full; 2684 2685 Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t))); 2686 phase->register_new_node(next_index, ctrl); 2687 2688 Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered); 2689 phase->register_new_node(buffer, ctrl); 2690 Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index); 2691 phase->register_new_node(log_addr, ctrl); 2692 Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered); 2693 phase->register_new_node(log_store, ctrl); 2694 // update the index 2695 Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered); 2696 phase->register_new_node(index_update, ctrl); 2697 2698 // Fast-path case 2699 region2->init_req(_fast_path, ctrl); 2700 phi2->init_req(_fast_path, index_update); 2701 2702 ctrl = full; 2703 2704 Node* base = find_bottom_mem(ctrl, phase); 2705 2706 MergeMemNode* mm = MergeMemNode::make(base); 2707 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); 2708 phase->register_new_node(mm, ctrl); 2709 2710 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM); 2711 call->init_req(TypeFunc::Control, ctrl); 2712 call->init_req(TypeFunc::I_O, phase->C->top()); 2713 call->init_req(TypeFunc::Memory, mm); 2714 call->init_req(TypeFunc::FramePtr, phase->C->top()); 2715 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 2716 call->init_req(TypeFunc::Parms, pre_val); 2717 call->init_req(TypeFunc::Parms+1, thread); 2718 phase->register_control(call, loop, ctrl); 2719 2720 Node* ctrl_proj = new ProjNode(call, TypeFunc::Control); 2721 phase->register_control(ctrl_proj, loop, call); 2722 Node* mem_proj = new ProjNode(call, TypeFunc::Memory); 2723 phase->register_new_node(mem_proj, call); 2724 2725 // Slow-path case 2726 region2->init_req(_slow_path, ctrl_proj); 2727 phi2->init_req(_slow_path, mem_proj); 2728 2729 phase->register_control(region2, loop, reg2_ctrl); 2730 phase->register_new_node(phi2, region2); 2731 2732 region->init_req(_heap_unstable, region2); 2733 phi->init_req(_heap_unstable, phi2); 2734 2735 phase->register_control(region, loop, heap_stable_ctrl->in(0)); 2736 phase->register_new_node(phi, region); 2737 2738 fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase); 2739 for(uint next = 0; next < uses.size(); next++ ) { 2740 Node *n = uses.at(next); 2741 assert(phase->get_ctrl(n) == init_ctrl, "bad control"); 2742 assert(n != init_raw_mem, "should leave input raw mem above the barrier"); 2743 phase->set_ctrl(n, region); 2744 follow_barrier_uses(n, init_ctrl, uses, phase); 2745 } 2746 fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses); 2747 2748 phase->igvn().replace_node(barrier, pre_val); 2749 } 2750 2751 for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) { 2752 int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); 2753 ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1); 2754 2755 uint last = phase->C->unique(); 2756 Node* ctrl = phase->get_ctrl(wb); 2757 Node* orig_ctrl = ctrl; 2758 2759 Node* raw_mem = fixer.find_mem(ctrl, wb); 2760 Node* init_raw_mem = raw_mem; 2761 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); 2762 int alias = phase->C->get_alias_index(wb->adr_type()); 2763 Node* wb_mem = wb->in(Memory); 2764 Node* init_wb_mem = wb_mem; 2765 2766 Node* val = wb->in(ValueIn); 2767 Node* wbproj = wb->find_out_with(Op_ShenandoahWBMemProj); 2768 IdealLoopTree *loop = phase->get_loop(ctrl); 2769 2770 assert(val->Opcode() != Op_ShenandoahWriteBarrier, "No chain of write barriers"); 2771 2772 CallStaticJavaNode* unc = wb->pin_and_expand_null_check(phase->igvn()); 2773 Node* unc_ctrl = NULL; 2774 if (unc != NULL) { 2775 if (val->in(0) != ctrl) { 2776 unc = NULL; 2777 } else { 2778 unc_ctrl = val->in(0); 2779 } 2780 } 2781 2782 Node* uncasted_val = val; 2783 if (unc != NULL) { 2784 uncasted_val = val->in(1); 2785 } 2786 2787 Node* heap_stable_ctrl = NULL; 2788 Node* null_ctrl = NULL; 2789 2790 assert(val->bottom_type()->make_oopptr(), "need oop"); 2791 assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); 2792 2793 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; 2794 Node* region = new RegionNode(PATH_LIMIT); 2795 Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); 2796 Node* mem_phi = PhiNode::make(region, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type()); 2797 Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 2798 2799 enum { _not_cset = 1, _not_equal, _evac_path, _null_path, PATH_LIMIT2 }; 2800 Node* region2 = new RegionNode(PATH_LIMIT2); 2801 Node* val_phi2 = new PhiNode(region2, uncasted_val->bottom_type()->is_oopptr()); 2802 Node* mem_phi2 = PhiNode::make(region2, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type()); 2803 Node* raw_mem_phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 2804 2805 // Stable path. 2806 test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); 2807 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); 2808 2809 // Heap stable case 2810 region->init_req(_heap_stable, heap_stable_ctrl); 2811 val_phi->init_req(_heap_stable, uncasted_val); 2812 mem_phi->init_req(_heap_stable, wb_mem); 2813 raw_mem_phi->init_req(_heap_stable, raw_mem); 2814 2815 Node* reg2_ctrl = NULL; 2816 // Null case 2817 test_null(ctrl, val, null_ctrl, phase); 2818 if (null_ctrl != NULL) { 2819 reg2_ctrl = null_ctrl->in(0); 2820 region2->init_req(_null_path, null_ctrl); 2821 val_phi2->init_req(_null_path, uncasted_val); 2822 mem_phi2->init_req(_null_path, wb_mem); 2823 raw_mem_phi2->init_req(_null_path, raw_mem); 2824 } else { 2825 region2->del_req(_null_path); 2826 val_phi2->del_req(_null_path); 2827 mem_phi2->del_req(_null_path); 2828 raw_mem_phi2->del_req(_null_path); 2829 } 2830 2831 // Test for in-cset. 2832 // Wires !in_cset(obj) to slot 2 of region and phis 2833 Node* not_cset_ctrl = NULL; 2834 in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase); 2835 if (not_cset_ctrl != NULL) { 2836 if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0); 2837 region2->init_req(_not_cset, not_cset_ctrl); 2838 val_phi2->init_req(_not_cset, uncasted_val); 2839 mem_phi2->init_req(_not_cset, wb_mem); 2840 raw_mem_phi2->init_req(_not_cset, raw_mem); 2841 } 2842 2843 // Resolve object when orig-value is in cset. 2844 // Make the unconditional resolve for fwdptr, not the read barrier. 2845 Node* new_val = uncasted_val; 2846 if (unc_ctrl != NULL) { 2847 // Clone the null check in this branch to allow implicit null check 2848 new_val = clone_null_check(ctrl, val, unc_ctrl, phase); 2849 fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase); 2850 2851 IfNode* iff = unc_ctrl->in(0)->as_If(); 2852 phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); 2853 } 2854 Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(ShenandoahBrooksPointer::byte_offset())); 2855 phase->register_new_node(addr, ctrl); 2856 assert(val->bottom_type()->isa_oopptr(), "what else?"); 2857 const TypePtr* obj_type = val->bottom_type()->is_oopptr(); 2858 const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type); 2859 Node* fwd = new LoadPNode(ctrl, wb_mem, addr, adr_type, obj_type, MemNode::unordered); 2860 phase->register_new_node(fwd, ctrl); 2861 2862 // Only branch to WB stub if object is not forwarded; otherwise reply with fwd ptr 2863 Node* cmp = new CmpPNode(fwd, new_val); 2864 phase->register_new_node(cmp, ctrl); 2865 Node* bol = new BoolNode(cmp, BoolTest::eq); 2866 phase->register_new_node(bol, ctrl); 2867 2868 IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 2869 if (reg2_ctrl == NULL) reg2_ctrl = iff; 2870 phase->register_control(iff, loop, ctrl); 2871 Node* if_not_eq = new IfFalseNode(iff); 2872 phase->register_control(if_not_eq, loop, iff); 2873 Node* if_eq = new IfTrueNode(iff); 2874 phase->register_control(if_eq, loop, iff); 2875 2876 // Wire up not-equal-path in slots 3. 2877 region2->init_req(_not_equal, if_not_eq); 2878 val_phi2->init_req(_not_equal, fwd); 2879 mem_phi2->init_req(_not_equal, wb_mem); 2880 raw_mem_phi2->init_req(_not_equal, raw_mem); 2881 2882 // Call wb-stub and wire up that path in slots 4 2883 Node* result_mem = NULL; 2884 ctrl = if_eq; 2885 call_wb_stub(ctrl, new_val, result_mem, 2886 raw_mem, wb_mem, 2887 alias, phase); 2888 region2->init_req(_evac_path, ctrl); 2889 val_phi2->init_req(_evac_path, new_val); 2890 mem_phi2->init_req(_evac_path, result_mem); 2891 raw_mem_phi2->init_req(_evac_path, result_mem); 2892 2893 phase->register_control(region2, loop, reg2_ctrl); 2894 phase->register_new_node(val_phi2, region2); 2895 phase->register_new_node(mem_phi2, region2); 2896 phase->register_new_node(raw_mem_phi2, region2); 2897 2898 region->init_req(_heap_unstable, region2); 2899 val_phi->init_req(_heap_unstable, val_phi2); 2900 mem_phi->init_req(_heap_unstable, mem_phi2); 2901 raw_mem_phi->init_req(_heap_unstable, raw_mem_phi2); 2902 2903 phase->register_control(region, loop, heap_stable_iff); 2904 Node* out_val = val_phi; 2905 phase->register_new_node(val_phi, region); 2906 phase->register_new_node(mem_phi, region); 2907 phase->register_new_node(raw_mem_phi, region); 2908 2909 fix_ctrl(wb, region, fixer, uses, uses_to_ignore, last, phase); 2910 2911 ctrl = orig_ctrl; 2912 2913 phase->igvn().replace_input_of(wbproj, ShenandoahWBMemProjNode::WriteBarrier, phase->C->top()); 2914 phase->igvn().replace_node(wbproj, mem_phi); 2915 if (unc != NULL) { 2916 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { 2917 Node* u = val->fast_out(i); 2918 Node* c = phase->ctrl_or_self(u); 2919 if (u != wb && (c != ctrl || is_dominator_same_ctrl(c, wb, u, phase))) { 2920 phase->igvn().rehash_node_delayed(u); 2921 int nb = u->replace_edge(val, out_val); 2922 --i, imax -= nb; 2923 } 2924 } 2925 if (val->outcnt() == 0) { 2926 phase->igvn()._worklist.push(val); 2927 } 2928 } 2929 phase->igvn().replace_node(wb, out_val); 2930 2931 follow_barrier_uses(mem_phi, ctrl, uses, phase); 2932 follow_barrier_uses(out_val, ctrl, uses, phase); 2933 2934 for(uint next = 0; next < uses.size(); next++ ) { 2935 Node *n = uses.at(next); 2936 assert(phase->get_ctrl(n) == ctrl, "bad control"); 2937 assert(n != init_raw_mem, "should leave input raw mem above the barrier"); 2938 phase->set_ctrl(n, region); 2939 follow_barrier_uses(n, ctrl, uses, phase); 2940 } 2941 2942 // The slow path call produces memory: hook the raw memory phi 2943 // from the expanded write barrier with the rest of the graph 2944 // which may require adding memory phis at every post dominated 2945 // region and at enclosing loop heads. Use the memory state 2946 // collected in memory_nodes to fix the memory graph. Update that 2947 // memory state as we go. 2948 fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses); 2949 assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == cnt - 1, "not replaced"); 2950 } 2951 2952 assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == 0, "all write barrier nodes should have been replaced"); 2953 } 2954 2955 void ShenandoahWriteBarrierNode::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { 2956 IdealLoopTree *loop = phase->get_loop(iff); 2957 Node* loop_head = loop->_head; 2958 Node* entry_c = loop_head->in(LoopNode::EntryControl); 2959 2960 Node* bol = iff->in(1); 2961 Node* cmp = bol->in(1); 2962 Node* andi = cmp->in(1); 2963 Node* load = andi->in(1); 2964 2965 assert(is_gc_state_load(load), "broken"); 2966 if (!phase->is_dominator(load->in(0), entry_c)) { 2967 Node* mem_ctrl = NULL; 2968 Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase); 2969 load = load->clone(); 2970 load->set_req(MemNode::Memory, mem); 2971 load->set_req(0, entry_c); 2972 phase->register_new_node(load, entry_c); 2973 andi = andi->clone(); 2974 andi->set_req(1, load); 2975 phase->register_new_node(andi, entry_c); 2976 cmp = cmp->clone(); 2977 cmp->set_req(1, andi); 2978 phase->register_new_node(cmp, entry_c); 2979 bol = bol->clone(); 2980 bol->set_req(1, cmp); 2981 phase->register_new_node(bol, entry_c); 2982 2983 Node* old_bol =iff->in(1); 2984 phase->igvn().replace_input_of(iff, 1, bol); 2985 } 2986 } 2987 2988 bool ShenandoahWriteBarrierNode::identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase) { 2989 if (!n->is_If() || n->is_CountedLoopEnd()) { 2990 return false; 2991 } 2992 Node* region = n->in(0); 2993 2994 if (!region->is_Region()) { 2995 return false; 2996 } 2997 Node* dom = phase->idom(region); 2998 if (!dom->is_If()) { 2999 return false; 3000 } 3001 3002 if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) { 3003 return false; 3004 } 3005 3006 IfNode* dom_if = dom->as_If(); 3007 Node* proj_true = dom_if->proj_out(1); 3008 Node* proj_false = dom_if->proj_out(0); 3009 3010 for (uint i = 1; i < region->req(); i++) { 3011 if (phase->is_dominator(proj_true, region->in(i))) { 3012 continue; 3013 } 3014 if (phase->is_dominator(proj_false, region->in(i))) { 3015 continue; 3016 } 3017 return false; 3018 } 3019 3020 return true; 3021 } 3022 3023 void ShenandoahWriteBarrierNode::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { 3024 assert(is_heap_stable_test(n), "no other tests"); 3025 if (identical_backtoback_ifs(n, phase)) { 3026 Node* n_ctrl = n->in(0); 3027 if (phase->can_split_if(n_ctrl)) { 3028 IfNode* dom_if = phase->idom(n_ctrl)->as_If(); 3029 if (is_heap_stable_test(n)) { 3030 Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1); 3031 assert(is_gc_state_load(gc_state_load), "broken"); 3032 Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1); 3033 assert(is_gc_state_load(dom_gc_state_load), "broken"); 3034 if (gc_state_load != dom_gc_state_load) { 3035 phase->igvn().replace_node(gc_state_load, dom_gc_state_load); 3036 } 3037 } 3038 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); 3039 Node* proj_true = dom_if->proj_out(1); 3040 Node* proj_false = dom_if->proj_out(0); 3041 Node* con_true = phase->igvn().makecon(TypeInt::ONE); 3042 Node* con_false = phase->igvn().makecon(TypeInt::ZERO); 3043 3044 for (uint i = 1; i < n_ctrl->req(); i++) { 3045 if (phase->is_dominator(proj_true, n_ctrl->in(i))) { 3046 bolphi->init_req(i, con_true); 3047 } else { 3048 assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if"); 3049 bolphi->init_req(i, con_false); 3050 } 3051 } 3052 phase->register_new_node(bolphi, n_ctrl); 3053 phase->igvn().replace_input_of(n, 1, bolphi); 3054 phase->do_split_if(n); 3055 } 3056 } 3057 } 3058 3059 void ShenandoahWriteBarrierNode::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { 3060 Node_List heap_stable_tests; 3061 Node_List gc_state_loads; 3062 3063 stack.push(phase->C->start(), 0); 3064 do { 3065 Node* n = stack.node(); 3066 uint i = stack.index(); 3067 3068 if (i < n->outcnt()) { 3069 Node* u = n->raw_out(i); 3070 stack.set_index(i+1); 3071 if (!visited.test_set(u->_idx)) { 3072 stack.push(u, 0); 3073 } 3074 } else { 3075 stack.pop(); 3076 if (ShenandoahCommonGCStateLoads && ShenandoahWriteBarrierNode::is_gc_state_load(n)) { 3077 gc_state_loads.push(n); 3078 } 3079 if (n->is_If() && ShenandoahWriteBarrierNode::is_heap_stable_test(n)) { 3080 heap_stable_tests.push(n); 3081 } 3082 } 3083 } while (stack.size() > 0); 3084 3085 bool progress; 3086 do { 3087 progress = false; 3088 for (uint i = 0; i < gc_state_loads.size(); i++) { 3089 Node* n = gc_state_loads.at(i); 3090 if (n->outcnt() != 0) { 3091 progress |= ShenandoahWriteBarrierNode::try_common_gc_state_load(n, phase); 3092 } 3093 } 3094 } while (progress); 3095 3096 for (uint i = 0; i < heap_stable_tests.size(); i++) { 3097 Node* n = heap_stable_tests.at(i); 3098 assert(is_heap_stable_test(n), "only evacuation test"); 3099 merge_back_to_back_tests(n, phase); 3100 } 3101 3102 if (!phase->C->major_progress()) { 3103 VectorSet seen(Thread::current()->resource_area()); 3104 for (uint i = 0; i < heap_stable_tests.size(); i++) { 3105 Node* n = heap_stable_tests.at(i); 3106 IdealLoopTree* loop = phase->get_loop(n); 3107 if (loop != phase->ltree_root() && 3108 loop->_child == NULL && 3109 !loop->_irreducible) { 3110 LoopNode* head = loop->_head->as_Loop(); 3111 if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) && 3112 !seen.test_set(head->_idx) && 3113 loop->policy_unswitching(phase, true)) { 3114 IfNode* iff = phase->find_unswitching_candidate(loop, true); 3115 if (iff != NULL && is_heap_stable_test(iff)) { 3116 if (head->is_strip_mined()) { 3117 head->verify_strip_mined(0); 3118 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop(); 3119 OuterStripMinedLoopEndNode* le = head->outer_loop_end(); 3120 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl)); 3121 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl)); 3122 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt); 3123 phase->register_control(new_le, phase->get_loop(le), le->in(0)); 3124 phase->lazy_replace(outer, new_outer); 3125 phase->lazy_replace(le, new_le); 3126 head->clear_strip_mined(); 3127 } 3128 phase->do_unswitching(loop, old_new, true); 3129 } 3130 } 3131 } 3132 } 3133 } 3134 } 3135 3136 #ifdef ASSERT 3137 void ShenandoahBarrierNode::verify_raw_mem(RootNode* root) { 3138 const bool trace = false; 3139 ResourceMark rm; 3140 Unique_Node_List nodes; 3141 Unique_Node_List controls; 3142 Unique_Node_List memories; 3143 3144 nodes.push(root); 3145 for (uint next = 0; next < nodes.size(); next++) { 3146 Node *n = nodes.at(next); 3147 if (ShenandoahBarrierSetC2::is_shenandoah_wb_call(n)) { 3148 controls.push(n); 3149 if (trace) { tty->print("XXXXXX verifying"); n->dump(); } 3150 for (uint next2 = 0; next2 < controls.size(); next2++) { 3151 Node *m = controls.at(next2); 3152 if (!m->is_Loop() || controls.member(m->in(LoopNode::EntryControl)) || 1) { 3153 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 3154 Node* u = m->fast_out(i); 3155 if (u->is_CFG() && !u->is_Root() && 3156 !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) && 3157 !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) { 3158 if (trace) { tty->print("XXXXXX pushing control"); u->dump(); } 3159 controls.push(u); 3160 } 3161 } 3162 } 3163 } 3164 memories.push(n->as_Call()->proj_out(TypeFunc::Memory)); 3165 for (uint next2 = 0; next2 < memories.size(); next2++) { 3166 Node *m = memories.at(next2); 3167 assert(m->bottom_type() == Type::MEMORY, ""); 3168 if (!m->is_Phi() || !m->in(0)->is_Loop() || controls.member(m->in(0)->in(LoopNode::EntryControl)) || 1) { 3169 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 3170 Node* u = m->fast_out(i); 3171 if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) { 3172 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } 3173 memories.push(u); 3174 } else if (u->is_LoadStore()) { 3175 if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); } 3176 memories.push(u->find_out_with(Op_SCMemProj)); 3177 } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) { 3178 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } 3179 memories.push(u); 3180 } else if (u->is_Phi()) { 3181 assert(u->bottom_type() == Type::MEMORY, ""); 3182 if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) { 3183 assert(controls.member(u->in(0)), ""); 3184 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } 3185 memories.push(u); 3186 } 3187 } else if (u->is_SafePoint() || u->is_MemBar()) { 3188 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 3189 Node* uu = u->fast_out(j); 3190 if (uu->bottom_type() == Type::MEMORY) { 3191 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); } 3192 memories.push(uu); 3193 } 3194 } 3195 } 3196 } 3197 } 3198 } 3199 for (uint next2 = 0; next2 < controls.size(); next2++) { 3200 Node *m = controls.at(next2); 3201 if (m->is_Region()) { 3202 bool all_in = true; 3203 for (uint i = 1; i < m->req(); i++) { 3204 if (!controls.member(m->in(i))) { 3205 all_in = false; 3206 break; 3207 } 3208 } 3209 if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); } 3210 bool found_phi = false; 3211 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) { 3212 Node* u = m->fast_out(j); 3213 if (u->is_Phi() && memories.member(u)) { 3214 found_phi = true; 3215 for (uint i = 1; i < u->req() && found_phi; i++) { 3216 Node* k = u->in(i); 3217 if (memories.member(k) != controls.member(m->in(i))) { 3218 found_phi = false; 3219 } 3220 } 3221 } 3222 } 3223 assert(found_phi || all_in, ""); 3224 } 3225 } 3226 controls.clear(); 3227 memories.clear(); 3228 } 3229 for( uint i = 0; i < n->len(); ++i ) { 3230 Node *m = n->in(i); 3231 if (m != NULL) { 3232 nodes.push(m); 3233 } 3234 } 3235 } 3236 } 3237 #endif 3238 3239 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const { 3240 if (in(1) == NULL || in(1)->is_top()) { 3241 return Type::TOP; 3242 } 3243 const Type* t = in(1)->bottom_type(); 3244 if (t == TypePtr::NULL_PTR) { 3245 return t; 3246 } 3247 return t->is_oopptr()->cast_to_nonconst(); 3248 } 3249 3250 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const { 3251 if (in(1) == NULL) { 3252 return Type::TOP; 3253 } 3254 const Type* t = phase->type(in(1)); 3255 if (t == Type::TOP) { 3256 return Type::TOP; 3257 } 3258 if (t == TypePtr::NULL_PTR) { 3259 return t; 3260 } 3261 return t->is_oopptr()->cast_to_nonconst(); 3262 } 3263 3264 int ShenandoahEnqueueBarrierNode::needed(Node* n) { 3265 if (n == NULL || 3266 n->is_Allocate() || 3267 n->Opcode() == Op_ShenandoahEnqueueBarrier || 3268 n->bottom_type() == TypePtr::NULL_PTR || 3269 (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) { 3270 return NotNeeded; 3271 } 3272 if (n->is_Phi() || 3273 n->is_CMove()) { 3274 return MaybeNeeded; 3275 } 3276 return Needed; 3277 } 3278 3279 Node* ShenandoahEnqueueBarrierNode::next(Node* n) { 3280 for (;;) { 3281 if (n == NULL) { 3282 return n; 3283 } else if (n->bottom_type() == TypePtr::NULL_PTR) { 3284 return n; 3285 } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) { 3286 return n; 3287 } else if (n->is_ConstraintCast() || 3288 n->Opcode() == Op_DecodeN || 3289 n->Opcode() == Op_EncodeP) { 3290 n = n->in(1); 3291 } else if (n->is_Proj()) { 3292 n = n->in(0); 3293 } else { 3294 return n; 3295 } 3296 } 3297 ShouldNotReachHere(); 3298 return NULL; 3299 } 3300 3301 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) { 3302 PhaseIterGVN* igvn = phase->is_IterGVN(); 3303 3304 Node* n = next(in(1)); 3305 3306 int cont = needed(n); 3307 3308 if (cont == NotNeeded) { 3309 return in(1); 3310 } else if (cont == MaybeNeeded) { 3311 if (igvn == NULL) { 3312 phase->record_for_igvn(this); 3313 return this; 3314 } else { 3315 ResourceMark rm; 3316 Unique_Node_List wq; 3317 uint wq_i = 0; 3318 3319 for (;;) { 3320 if (n->is_Phi()) { 3321 for (uint i = 1; i < n->req(); i++) { 3322 Node* m = n->in(i); 3323 if (m != NULL) { 3324 wq.push(m); 3325 } 3326 } 3327 } else { 3328 assert(n->is_CMove(), "nothing else here"); 3329 Node* m = n->in(CMoveNode::IfFalse); 3330 wq.push(m); 3331 m = n->in(CMoveNode::IfTrue); 3332 wq.push(m); 3333 } 3334 Node* orig_n = NULL; 3335 do { 3336 if (wq_i >= wq.size()) { 3337 return in(1); 3338 } 3339 n = wq.at(wq_i); 3340 wq_i++; 3341 orig_n = n; 3342 n = next(n); 3343 cont = needed(n); 3344 if (cont == Needed) { 3345 return this; 3346 } 3347 } while (cont != MaybeNeeded || (orig_n != n && wq.member(n))); 3348 } 3349 } 3350 } 3351 3352 return this; 3353 } 3354 3355 #ifdef ASSERT 3356 static bool has_never_branch(Node* root) { 3357 for (uint i = 1; i < root->req(); i++) { 3358 Node* in = root->in(i); 3359 if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) { 3360 return true; 3361 } 3362 } 3363 return false; 3364 } 3365 #endif 3366 3367 void MemoryGraphFixer::collect_memory_nodes() { 3368 Node_Stack stack(0); 3369 VectorSet visited(Thread::current()->resource_area()); 3370 Node_List regions; 3371 3372 // Walk the raw memory graph and create a mapping from CFG node to 3373 // memory node. Exclude phis for now. 3374 stack.push(_phase->C->root(), 1); 3375 do { 3376 Node* n = stack.node(); 3377 int opc = n->Opcode(); 3378 uint i = stack.index(); 3379 if (i < n->req()) { 3380 Node* mem = NULL; 3381 if (opc == Op_Root) { 3382 Node* in = n->in(i); 3383 int in_opc = in->Opcode(); 3384 if (in_opc == Op_Return || in_opc == Op_Rethrow) { 3385 mem = in->in(TypeFunc::Memory); 3386 } else if (in_opc == Op_Halt) { 3387 if (!in->in(0)->is_Region()) { 3388 Node* proj = in->in(0); 3389 assert(proj->is_Proj(), ""); 3390 Node* in = proj->in(0); 3391 assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), ""); 3392 if (in->is_CallStaticJava()) { 3393 mem = in->in(TypeFunc::Memory); 3394 } else if (in->Opcode() == Op_Catch) { 3395 Node* call = in->in(0)->in(0); 3396 assert(call->is_Call(), ""); 3397 mem = call->in(TypeFunc::Memory); 3398 } 3399 } 3400 } else { 3401 #ifdef ASSERT 3402 n->dump(); 3403 in->dump(); 3404 #endif 3405 ShouldNotReachHere(); 3406 } 3407 } else { 3408 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, ""); 3409 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, ""); 3410 mem = n->in(i); 3411 } 3412 i++; 3413 stack.set_index(i); 3414 if (mem == NULL) { 3415 continue; 3416 } 3417 for (;;) { 3418 if (visited.test_set(mem->_idx) || mem->is_Start()) { 3419 break; 3420 } 3421 if (mem->is_Phi()) { 3422 stack.push(mem, 2); 3423 mem = mem->in(1); 3424 } else if (mem->is_Proj()) { 3425 stack.push(mem, mem->req()); 3426 mem = mem->in(0); 3427 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 3428 mem = mem->in(TypeFunc::Memory); 3429 } else if (mem->is_MergeMem()) { 3430 MergeMemNode* mm = mem->as_MergeMem(); 3431 mem = mm->memory_at(_alias); 3432 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 3433 assert(_alias == Compile::AliasIdxRaw, ""); 3434 stack.push(mem, mem->req()); 3435 mem = mem->in(MemNode::Memory); 3436 } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) { 3437 assert(_alias != Compile::AliasIdxRaw, ""); 3438 mem = mem->in(ShenandoahBarrierNode::Memory); 3439 } else if (mem->Opcode() == Op_ShenandoahWBMemProj) { 3440 stack.push(mem, mem->req()); 3441 mem = mem->in(ShenandoahWBMemProjNode::WriteBarrier); 3442 } else { 3443 #ifdef ASSERT 3444 mem->dump(); 3445 #endif 3446 ShouldNotReachHere(); 3447 } 3448 } 3449 } else { 3450 if (n->is_Phi()) { 3451 // Nothing 3452 } else if (!n->is_Root()) { 3453 Node* c = get_ctrl(n); 3454 _memory_nodes.map(c->_idx, n); 3455 } 3456 stack.pop(); 3457 } 3458 } while(stack.is_nonempty()); 3459 3460 // Iterate over CFG nodes in rpo and propagate memory state to 3461 // compute memory state at regions, creating new phis if needed. 3462 Node_List rpo_list; 3463 visited.Clear(); 3464 _phase->rpo(_phase->C->root(), stack, visited, rpo_list); 3465 Node* root = rpo_list.pop(); 3466 assert(root == _phase->C->root(), ""); 3467 3468 const bool trace = false; 3469 #ifdef ASSERT 3470 if (trace) { 3471 for (int i = rpo_list.size() - 1; i >= 0; i--) { 3472 Node* c = rpo_list.at(i); 3473 if (_memory_nodes[c->_idx] != NULL) { 3474 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); 3475 } 3476 } 3477 } 3478 #endif 3479 uint last = _phase->C->unique(); 3480 3481 #ifdef ASSERT 3482 uint8_t max_depth = 0; 3483 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) { 3484 IdealLoopTree* lpt = iter.current(); 3485 max_depth = MAX2(max_depth, lpt->_nest); 3486 } 3487 #endif 3488 3489 bool progress = true; 3490 int iteration = 0; 3491 Node_List dead_phis; 3492 while (progress) { 3493 progress = false; 3494 iteration++; 3495 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop(), ""); 3496 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } 3497 IdealLoopTree* last_updated_ilt = NULL; 3498 for (int i = rpo_list.size() - 1; i >= 0; i--) { 3499 Node* c = rpo_list.at(i); 3500 3501 Node* prev_mem = _memory_nodes[c->_idx]; 3502 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 3503 Node* prev_region = regions[c->_idx]; 3504 Node* unique = NULL; 3505 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) { 3506 Node* m = _memory_nodes[c->in(j)->_idx]; 3507 assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state"); 3508 if (m != NULL) { 3509 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) { 3510 assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), ""); 3511 // continue 3512 } else if (unique == NULL) { 3513 unique = m; 3514 } else if (m == unique) { 3515 // continue 3516 } else { 3517 unique = NodeSentinel; 3518 } 3519 } 3520 } 3521 assert(unique != NULL, "empty phi???"); 3522 if (unique != NodeSentinel) { 3523 if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) { 3524 dead_phis.push(prev_region); 3525 } 3526 regions.map(c->_idx, unique); 3527 } else { 3528 Node* phi = NULL; 3529 if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) { 3530 phi = prev_region; 3531 for (uint k = 1; k < c->req(); k++) { 3532 Node* m = _memory_nodes[c->in(k)->_idx]; 3533 assert(m != NULL, "expect memory state"); 3534 phi->set_req(k, m); 3535 } 3536 } else { 3537 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) { 3538 Node* u = c->fast_out(j); 3539 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 3540 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) { 3541 phi = u; 3542 for (uint k = 1; k < c->req() && phi != NULL; k++) { 3543 Node* m = _memory_nodes[c->in(k)->_idx]; 3544 assert(m != NULL, "expect memory state"); 3545 if (u->in(k) != m) { 3546 phi = NULL; 3547 } 3548 } 3549 } 3550 } 3551 if (phi == NULL) { 3552 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias)); 3553 for (uint k = 1; k < c->req(); k++) { 3554 Node* m = _memory_nodes[c->in(k)->_idx]; 3555 assert(m != NULL, "expect memory state"); 3556 phi->init_req(k, m); 3557 } 3558 } 3559 } 3560 assert(phi != NULL, ""); 3561 regions.map(c->_idx, phi); 3562 } 3563 Node* current_region = regions[c->_idx]; 3564 if (current_region != prev_region) { 3565 progress = true; 3566 if (prev_region == prev_mem) { 3567 _memory_nodes.map(c->_idx, current_region); 3568 } 3569 } 3570 } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) { 3571 Node* m = _memory_nodes[_phase->idom(c)->_idx]; 3572 assert(m != NULL, "expect memory state"); 3573 if (m != prev_mem) { 3574 _memory_nodes.map(c->_idx, m); 3575 progress = true; 3576 } 3577 } 3578 #ifdef ASSERT 3579 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); } 3580 #endif 3581 } 3582 } 3583 3584 // Replace existing phi with computed memory state for that region 3585 // if different (could be a new phi or a dominating memory node if 3586 // that phi was found to be useless). 3587 while (dead_phis.size() > 0) { 3588 Node* n = dead_phis.pop(); 3589 n->replace_by(_phase->C->top()); 3590 n->destruct(); 3591 } 3592 for (int i = rpo_list.size() - 1; i >= 0; i--) { 3593 Node* c = rpo_list.at(i); 3594 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 3595 Node* n = regions[c->_idx]; 3596 if (n->is_Phi() && n->_idx >= last && n->in(0) == c) { 3597 _phase->register_new_node(n, c); 3598 } 3599 } 3600 } 3601 for (int i = rpo_list.size() - 1; i >= 0; i--) { 3602 Node* c = rpo_list.at(i); 3603 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 3604 Node* n = regions[c->_idx]; 3605 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 3606 Node* u = c->fast_out(i); 3607 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 3608 u != n) { 3609 if (u->adr_type() == TypePtr::BOTTOM) { 3610 fix_memory_uses(u, n, n, c); 3611 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 3612 _phase->lazy_replace(u, n); 3613 --i; --imax; 3614 } 3615 } 3616 } 3617 } 3618 } 3619 } 3620 3621 Node* MemoryGraphFixer::get_ctrl(Node* n) const { 3622 Node* c = _phase->get_ctrl(n); 3623 if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) { 3624 assert(c == n->in(0), ""); 3625 CallNode* call = c->as_Call(); 3626 CallProjections projs; 3627 call->extract_projections(&projs, true, false); 3628 if (projs.catchall_memproj != NULL) { 3629 if (projs.fallthrough_memproj == n) { 3630 c = projs.fallthrough_catchproj; 3631 } else { 3632 assert(projs.catchall_memproj == n, ""); 3633 c = projs.catchall_catchproj; 3634 } 3635 } 3636 } 3637 return c; 3638 } 3639 3640 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const { 3641 if (_phase->has_ctrl(n)) 3642 return get_ctrl(n); 3643 else { 3644 assert (n->is_CFG(), "must be a CFG node"); 3645 return n; 3646 } 3647 } 3648 3649 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const { 3650 return m != NULL && get_ctrl(m) == c; 3651 } 3652 3653 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const { 3654 assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, ""); 3655 Node* mem = _memory_nodes[ctrl->_idx]; 3656 Node* c = ctrl; 3657 while (!mem_is_valid(mem, c) && 3658 (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) { 3659 c = _phase->idom(c); 3660 mem = _memory_nodes[c->_idx]; 3661 } 3662 if (n != NULL && mem_is_valid(mem, c)) { 3663 while (!ShenandoahWriteBarrierNode::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { 3664 mem = next_mem(mem, _alias); 3665 } 3666 if (mem->is_MergeMem()) { 3667 mem = mem->as_MergeMem()->memory_at(_alias); 3668 } 3669 if (!mem_is_valid(mem, c)) { 3670 do { 3671 c = _phase->idom(c); 3672 mem = _memory_nodes[c->_idx]; 3673 } while (!mem_is_valid(mem, c) && 3674 (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))); 3675 } 3676 } 3677 assert(mem->bottom_type() == Type::MEMORY, ""); 3678 return mem; 3679 } 3680 3681 bool MemoryGraphFixer::has_mem_phi(Node* region) const { 3682 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3683 Node* use = region->fast_out(i); 3684 if (use->is_Phi() && use->bottom_type() == Type::MEMORY && 3685 (_phase->C->get_alias_index(use->adr_type()) == _alias)) { 3686 return true; 3687 } 3688 } 3689 return false; 3690 } 3691 3692 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) { 3693 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, ""); 3694 const bool trace = false; 3695 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); }); 3696 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); }); 3697 GrowableArray<Node*> phis; 3698 if (mem_for_ctrl != mem) { 3699 Node* old = mem_for_ctrl; 3700 Node* prev = NULL; 3701 while (old != mem) { 3702 prev = old; 3703 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) { 3704 assert(_alias == Compile::AliasIdxRaw, ""); 3705 old = old->in(MemNode::Memory); 3706 } else if (old->Opcode() == Op_SCMemProj) { 3707 assert(_alias == Compile::AliasIdxRaw, ""); 3708 old = old->in(0); 3709 } else if (old->Opcode() == Op_ShenandoahWBMemProj) { 3710 assert(_alias != Compile::AliasIdxRaw, ""); 3711 old = old->in(ShenandoahWBMemProjNode::WriteBarrier); 3712 } else if (old->Opcode() == Op_ShenandoahWriteBarrier) { 3713 assert(_alias != Compile::AliasIdxRaw, ""); 3714 old = old->in(ShenandoahBarrierNode::Memory); 3715 } else { 3716 ShouldNotReachHere(); 3717 } 3718 } 3719 assert(prev != NULL, ""); 3720 if (new_ctrl != ctrl) { 3721 _memory_nodes.map(ctrl->_idx, mem); 3722 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); 3723 } 3724 uint input = prev->Opcode() == Op_ShenandoahWriteBarrier ? (uint)ShenandoahBarrierNode::Memory : (uint)MemNode::Memory; 3725 _phase->igvn().replace_input_of(prev, input, new_mem); 3726 } else { 3727 uses.clear(); 3728 _memory_nodes.map(new_ctrl->_idx, new_mem); 3729 uses.push(new_ctrl); 3730 for(uint next = 0; next < uses.size(); next++ ) { 3731 Node *n = uses.at(next); 3732 assert(n->is_CFG(), ""); 3733 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); }); 3734 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3735 Node* u = n->fast_out(i); 3736 if (!u->is_Root() && u->is_CFG() && u != n) { 3737 Node* m = _memory_nodes[u->_idx]; 3738 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) && 3739 !has_mem_phi(u) && 3740 u->unique_ctrl_out()->Opcode() != Op_Halt) { 3741 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); }); 3742 DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); }); 3743 3744 if (!mem_is_valid(m, u) || !m->is_Phi()) { 3745 bool push = true; 3746 bool create_phi = true; 3747 if (_phase->is_dominator(new_ctrl, u)) { 3748 create_phi = false; 3749 } else if (!_phase->C->has_irreducible_loop()) { 3750 IdealLoopTree* loop = _phase->get_loop(ctrl); 3751 bool do_check = true; 3752 IdealLoopTree* l = loop; 3753 create_phi = false; 3754 while (l != _phase->ltree_root()) { 3755 if (_phase->is_dominator(l->_head, u) && _phase->is_dominator(_phase->idom(u), l->_head)) { 3756 create_phi = true; 3757 do_check = false; 3758 break; 3759 } 3760 l = l->_parent; 3761 } 3762 3763 if (do_check) { 3764 assert(!create_phi, ""); 3765 IdealLoopTree* u_loop = _phase->get_loop(u); 3766 if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) { 3767 Node* c = ctrl; 3768 while (!_phase->is_dominator(c, u_loop->tail())) { 3769 c = _phase->idom(c); 3770 } 3771 if (!_phase->is_dominator(c, u)) { 3772 do_check = false; 3773 } 3774 } 3775 } 3776 3777 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) { 3778 create_phi = true; 3779 } 3780 } 3781 if (create_phi) { 3782 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias)); 3783 _phase->register_new_node(phi, u); 3784 phis.push(phi); 3785 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); }); 3786 if (!mem_is_valid(m, u)) { 3787 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); }); 3788 _memory_nodes.map(u->_idx, phi); 3789 } else { 3790 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); 3791 for (;;) { 3792 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj() || m->Opcode() == Op_ShenandoahWriteBarrier || m->Opcode() == Op_ShenandoahWBMemProj, ""); 3793 Node* next = NULL; 3794 if (m->is_Proj()) { 3795 next = m->in(0); 3796 } else if (m->Opcode() == Op_ShenandoahWBMemProj) { 3797 next = m->in(ShenandoahWBMemProjNode::WriteBarrier); 3798 } else if (m->is_Mem() || m->is_LoadStore()) { 3799 assert(_alias == Compile::AliasIdxRaw, ""); 3800 next = m->in(MemNode::Memory); 3801 } else { 3802 assert(_alias != Compile::AliasIdxRaw, ""); 3803 assert (m->Opcode() == Op_ShenandoahWriteBarrier, ""); 3804 next = m->in(ShenandoahBarrierNode::Memory); 3805 } 3806 if (_phase->get_ctrl(next) != u) { 3807 break; 3808 } 3809 if (next->is_MergeMem()) { 3810 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, ""); 3811 break; 3812 } 3813 if (next->is_Phi()) { 3814 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, ""); 3815 break; 3816 } 3817 m = next; 3818 } 3819 3820 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); 3821 assert(m->is_Mem() || m->is_LoadStore() || m->Opcode() == Op_ShenandoahWriteBarrier, ""); 3822 uint input = (m->is_Mem() || m->is_LoadStore()) ? (uint)MemNode::Memory : (uint)ShenandoahBarrierNode::Memory; 3823 _phase->igvn().replace_input_of(m, input, phi); 3824 push = false; 3825 } 3826 } else { 3827 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); }); 3828 } 3829 if (push) { 3830 uses.push(u); 3831 } 3832 } 3833 } else if (!mem_is_valid(m, u) && 3834 !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) { 3835 uses.push(u); 3836 } 3837 } 3838 } 3839 } 3840 for (int i = 0; i < phis.length(); i++) { 3841 Node* n = phis.at(i); 3842 Node* r = n->in(0); 3843 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); }); 3844 for (uint j = 1; j < n->req(); j++) { 3845 Node* m = find_mem(r->in(j), NULL); 3846 _phase->igvn().replace_input_of(n, j, m); 3847 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); }); 3848 } 3849 } 3850 } 3851 uint last = _phase->C->unique(); 3852 MergeMemNode* mm = NULL; 3853 int alias = _alias; 3854 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); }); 3855 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 3856 Node* u = mem->out(i); 3857 if (u->_idx < last) { 3858 if (u->is_Mem()) { 3859 if (_phase->C->get_alias_index(u->adr_type()) == alias) { 3860 Node* m = find_mem(_phase->get_ctrl(u), u); 3861 if (m != mem) { 3862 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 3863 _phase->igvn().replace_input_of(u, MemNode::Memory, m); 3864 --i; 3865 } 3866 } 3867 } else if (u->is_MergeMem()) { 3868 MergeMemNode* u_mm = u->as_MergeMem(); 3869 if (u_mm->memory_at(alias) == mem) { 3870 MergeMemNode* newmm = NULL; 3871 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 3872 Node* uu = u->fast_out(j); 3873 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 3874 if (uu->is_Phi()) { 3875 assert(uu->adr_type() == TypePtr::BOTTOM, ""); 3876 Node* region = uu->in(0); 3877 int nb = 0; 3878 for (uint k = 1; k < uu->req(); k++) { 3879 if (uu->in(k) == u) { 3880 Node* m = find_mem(region->in(k), NULL); 3881 if (m != mem) { 3882 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); }); 3883 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 3884 if (newmm != u) { 3885 _phase->igvn().replace_input_of(uu, k, newmm); 3886 nb++; 3887 --jmax; 3888 } 3889 } 3890 } 3891 } 3892 if (nb > 0) { 3893 --j; 3894 } 3895 } else { 3896 Node* m = find_mem(_phase->ctrl_or_self(uu), uu); 3897 if (m != mem) { 3898 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); }); 3899 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 3900 if (newmm != u) { 3901 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 3902 --j, --jmax; 3903 } 3904 } 3905 } 3906 } 3907 } 3908 } else if (u->is_Phi()) { 3909 assert(u->bottom_type() == Type::MEMORY, "what else?"); 3910 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) { 3911 Node* region = u->in(0); 3912 bool replaced = false; 3913 for (uint j = 1; j < u->req(); j++) { 3914 if (u->in(j) == mem) { 3915 Node* m = find_mem(region->in(j), NULL); 3916 Node* nnew = m; 3917 if (m != mem) { 3918 if (u->adr_type() == TypePtr::BOTTOM) { 3919 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m)); 3920 nnew = mm; 3921 } 3922 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); }); 3923 _phase->igvn().replace_input_of(u, j, nnew); 3924 replaced = true; 3925 } 3926 } 3927 } 3928 if (replaced) { 3929 --i; 3930 } 3931 } 3932 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 3933 u->adr_type() == NULL) { 3934 assert(u->adr_type() != NULL || 3935 u->Opcode() == Op_Rethrow || 3936 u->Opcode() == Op_Return || 3937 u->Opcode() == Op_SafePoint || 3938 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 3939 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 3940 u->Opcode() == Op_CallLeaf, ""); 3941 Node* m = find_mem(_phase->ctrl_or_self(u), u); 3942 if (m != mem) { 3943 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m)); 3944 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 3945 --i; 3946 } 3947 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) { 3948 Node* m = find_mem(_phase->ctrl_or_self(u), u); 3949 if (m != mem) { 3950 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 3951 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 3952 --i; 3953 } 3954 } else if (u->adr_type() != TypePtr::BOTTOM && 3955 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) { 3956 Node* m = find_mem(_phase->ctrl_or_self(u), u); 3957 assert(m != mem, ""); 3958 // u is on the wrong slice... 3959 assert(u->is_ClearArray(), ""); 3960 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 3961 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 3962 --i; 3963 } 3964 } 3965 } 3966 #ifdef ASSERT 3967 assert(new_mem->outcnt() > 0, ""); 3968 for (int i = 0; i < phis.length(); i++) { 3969 Node* n = phis.at(i); 3970 assert(n->outcnt() > 0, "new phi must have uses now"); 3971 } 3972 #endif 3973 } 3974 3975 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const { 3976 MergeMemNode* mm = MergeMemNode::make(mem); 3977 mm->set_memory_at(_alias, rep_proj); 3978 _phase->register_new_node(mm, rep_ctrl); 3979 return mm; 3980 } 3981 3982 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const { 3983 MergeMemNode* newmm = NULL; 3984 MergeMemNode* u_mm = u->as_MergeMem(); 3985 Node* c = _phase->get_ctrl(u); 3986 if (_phase->is_dominator(c, rep_ctrl)) { 3987 c = rep_ctrl; 3988 } else { 3989 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other"); 3990 } 3991 if (u->outcnt() == 1) { 3992 if (u->req() > (uint)_alias && u->in(_alias) == mem) { 3993 _phase->igvn().replace_input_of(u, _alias, rep_proj); 3994 --i; 3995 } else { 3996 _phase->igvn().rehash_node_delayed(u); 3997 u_mm->set_memory_at(_alias, rep_proj); 3998 } 3999 newmm = u_mm; 4000 _phase->set_ctrl_and_loop(u, c); 4001 } else { 4002 // can't simply clone u and then change one of its input because 4003 // it adds and then removes an edge which messes with the 4004 // DUIterator 4005 newmm = MergeMemNode::make(u_mm->base_memory()); 4006 for (uint j = 0; j < u->req(); j++) { 4007 if (j < newmm->req()) { 4008 if (j == (uint)_alias) { 4009 newmm->set_req(j, rep_proj); 4010 } else if (newmm->in(j) != u->in(j)) { 4011 newmm->set_req(j, u->in(j)); 4012 } 4013 } else if (j == (uint)_alias) { 4014 newmm->add_req(rep_proj); 4015 } else { 4016 newmm->add_req(u->in(j)); 4017 } 4018 } 4019 if ((uint)_alias >= u->req()) { 4020 newmm->set_memory_at(_alias, rep_proj); 4021 } 4022 _phase->register_new_node(newmm, c); 4023 } 4024 return newmm; 4025 } 4026 4027 bool MemoryGraphFixer::should_process_phi(Node* phi) const { 4028 if (phi->adr_type() == TypePtr::BOTTOM) { 4029 Node* region = phi->in(0); 4030 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) { 4031 Node* uu = region->fast_out(j); 4032 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) { 4033 return false; 4034 } 4035 } 4036 return true; 4037 } 4038 return _phase->C->get_alias_index(phi->adr_type()) == _alias; 4039 } 4040 4041 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const { 4042 uint last = _phase-> C->unique(); 4043 MergeMemNode* mm = NULL; 4044 assert(mem->bottom_type() == Type::MEMORY, ""); 4045 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 4046 Node* u = mem->out(i); 4047 if (u != replacement && u->_idx < last) { 4048 if (u->is_ShenandoahBarrier() && _alias != Compile::AliasIdxRaw) { 4049 if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 4050 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); 4051 assert(u->find_edge(mem) == -1, "only one edge"); 4052 --i; 4053 } 4054 } else if (u->is_Mem()) { 4055 if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 4056 assert(_alias == Compile::AliasIdxRaw , "only raw memory can lead to a memory operation"); 4057 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); 4058 assert(u->find_edge(mem) == -1, "only one edge"); 4059 --i; 4060 } 4061 } else if (u->is_MergeMem()) { 4062 MergeMemNode* u_mm = u->as_MergeMem(); 4063 if (u_mm->memory_at(_alias) == mem) { 4064 MergeMemNode* newmm = NULL; 4065 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 4066 Node* uu = u->fast_out(j); 4067 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 4068 if (uu->is_Phi()) { 4069 if (should_process_phi(uu)) { 4070 Node* region = uu->in(0); 4071 int nb = 0; 4072 for (uint k = 1; k < uu->req(); k++) { 4073 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) { 4074 if (newmm == NULL) { 4075 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 4076 } 4077 if (newmm != u) { 4078 _phase->igvn().replace_input_of(uu, k, newmm); 4079 nb++; 4080 --jmax; 4081 } 4082 } 4083 } 4084 if (nb > 0) { 4085 --j; 4086 } 4087 } 4088 } else { 4089 if (rep_ctrl != uu && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { 4090 if (newmm == NULL) { 4091 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 4092 } 4093 if (newmm != u) { 4094 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 4095 --j, --jmax; 4096 } 4097 } 4098 } 4099 } 4100 } 4101 } else if (u->is_Phi()) { 4102 assert(u->bottom_type() == Type::MEMORY, "what else?"); 4103 Node* region = u->in(0); 4104 if (should_process_phi(u)) { 4105 bool replaced = false; 4106 for (uint j = 1; j < u->req(); j++) { 4107 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) { 4108 Node* nnew = rep_proj; 4109 if (u->adr_type() == TypePtr::BOTTOM) { 4110 if (mm == NULL) { 4111 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 4112 } 4113 nnew = mm; 4114 } 4115 _phase->igvn().replace_input_of(u, j, nnew); 4116 replaced = true; 4117 } 4118 } 4119 if (replaced) { 4120 --i; 4121 } 4122 4123 } 4124 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 4125 u->adr_type() == NULL) { 4126 assert(u->adr_type() != NULL || 4127 u->Opcode() == Op_Rethrow || 4128 u->Opcode() == Op_Return || 4129 u->Opcode() == Op_SafePoint || 4130 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 4131 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 4132 u->Opcode() == Op_CallLeaf, ""); 4133 if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 4134 if (mm == NULL) { 4135 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 4136 } 4137 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 4138 --i; 4139 } 4140 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 4141 if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 4142 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); 4143 --i; 4144 } 4145 } 4146 } 4147 } 4148 } 4149 4150 void MemoryGraphFixer::remove(Node* n) { 4151 assert(n->Opcode() == Op_ShenandoahWBMemProj, ""); 4152 Node* c = _phase->get_ctrl(n); 4153 Node* mem = find_mem(c, NULL); 4154 if (mem == n) { 4155 _memory_nodes.map(c->_idx, mem->in(ShenandoahWBMemProjNode::WriteBarrier)->in(ShenandoahBarrierNode::Memory)); 4156 } 4157 } 4158