src/share/vm/opto/lcm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6895383 Sdiff src/share/vm/opto

src/share/vm/opto/lcm.cpp

Print this page




 599         Node *m = n->in(j);
 600         if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
 601           local++;              // One more block-local input
 602       }
 603       ready_cnt[n->_idx] = local; // Count em up
 604 
 605       // A few node types require changing a required edge to a precedence edge
 606       // before allocation.
 607       if( UseConcMarkSweepGC || UseG1GC ) {
 608         if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
 609           // Note: Required edges with an index greater than oper_input_base
 610           // are not supported by the allocator.
 611           // Note2: Can only depend on unmatched edge being last,
 612           // can not depend on its absolute position.
 613           Node *oop_store = n->in(n->req() - 1);
 614           n->del_req(n->req() - 1);
 615           n->add_prec(oop_store);
 616           assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
 617         }
 618       }
 619       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
 620           n->req() > TypeFunc::Parms ) {

 621         // MemBarAcquire could be created without Precedent edge.
 622         // del_req() replaces the specified edge with the last input edge
 623         // and then removes the last edge. If the specified edge > number of
 624         // edges the last edge will be moved outside of the input edges array
 625         // and the edge will be lost. This is why this code should be
 626         // executed only when Precedent (== TypeFunc::Parms) edge is present.
 627         Node *x = n->in(TypeFunc::Parms);
 628         n->del_req(TypeFunc::Parms);
 629         n->add_prec(x);
 630       }
 631     }
 632   }
 633   for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
 634     ready_cnt[_nodes[i2]->_idx] = 0;
 635 
 636   // All the prescheduled guys do not hold back internal nodes
 637   uint i3;
 638   for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
 639     Node *n = _nodes[i3];       // Get pre-scheduled
 640     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {




 599         Node *m = n->in(j);
 600         if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
 601           local++;              // One more block-local input
 602       }
 603       ready_cnt[n->_idx] = local; // Count em up
 604 
 605       // A few node types require changing a required edge to a precedence edge
 606       // before allocation.
 607       if( UseConcMarkSweepGC || UseG1GC ) {
 608         if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
 609           // Note: Required edges with an index greater than oper_input_base
 610           // are not supported by the allocator.
 611           // Note2: Can only depend on unmatched edge being last,
 612           // can not depend on its absolute position.
 613           Node *oop_store = n->in(n->req() - 1);
 614           n->del_req(n->req() - 1);
 615           n->add_prec(oop_store);
 616           assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
 617         }
 618       }
 619       if( n->is_Mach() && n->req() > TypeFunc::Parms &&
 620           (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
 621            n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
 622         // MemBarAcquire could be created without Precedent edge.
 623         // del_req() replaces the specified edge with the last input edge
 624         // and then removes the last edge. If the specified edge > number of
 625         // edges the last edge will be moved outside of the input edges array
 626         // and the edge will be lost. This is why this code should be
 627         // executed only when Precedent (== TypeFunc::Parms) edge is present.
 628         Node *x = n->in(TypeFunc::Parms);
 629         n->del_req(TypeFunc::Parms);
 630         n->add_prec(x);
 631       }
 632     }
 633   }
 634   for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
 635     ready_cnt[_nodes[i2]->_idx] = 0;
 636 
 637   // All the prescheduled guys do not hold back internal nodes
 638   uint i3;
 639   for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
 640     Node *n = _nodes[i3];       // Get pre-scheduled
 641     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {


src/share/vm/opto/lcm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File