< prev index next >

src/hotspot/share/opto/loopnode.cpp

Print this page




3951     compute_lca_of_uses(n, early, true);
3952   }
3953 #endif
3954 
3955   // if this is a load, check for anti-dependent stores
3956   // We use a conservative algorithm to identify potential interfering
3957   // instructions and for rescheduling the load.  The users of the memory
3958   // input of this load are examined.  Any use which is not a load and is
3959   // dominated by early is considered a potentially interfering store.
3960   // This can produce false positives.
3961   if (n->is_Load() && LCA != early) {
3962     Node_List worklist;
3963 
3964     Node *mem = n->in(MemNode::Memory);
3965     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3966       Node* s = mem->fast_out(i);
3967       worklist.push(s);
3968     }
3969     while(worklist.size() != 0 && LCA != early) {
3970       Node* s = worklist.pop();
3971       if (s->is_Load() || s->Opcode() == Op_SafePoint ||
3972           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
3973         continue;
3974       } else if (s->is_MergeMem()) {
3975         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
3976           Node* s1 = s->fast_out(i);
3977           worklist.push(s1);
3978         }
3979       } else {
3980         Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
3981         assert(sctrl != NULL || s->outcnt() == 0, "must have control");
3982         if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
3983           LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
3984         }
3985       }
3986     }
3987   }
3988 
3989   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
3990   return LCA;
3991 }


4168       Node *m = wq.at(i);
4169       for (uint i = 1; i < m->req(); i++) {
4170         Node* nn = m->in(i);
4171         if (nn == n) {
4172           return;
4173         }
4174         if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4175           wq.push(nn);
4176         }
4177       }
4178     }
4179     ShouldNotReachHere();
4180   }
4181 #endif
4182 }
4183 
4184 
4185 //------------------------------build_loop_late_post---------------------------
4186 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
4187 // Second pass finds latest legal placement, and ideal loop placement.
4188 void PhaseIdealLoop::build_loop_late_post( Node *n ) {










4189 
4190   if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
4191     _igvn._worklist.push(n);  // Maybe we'll normalize it, if no more loops.
4192   }
4193 
4194 #ifdef ASSERT
4195   if (_verify_only && !n->is_CFG()) {
4196     // Check def-use domination.
4197     compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
4198   }
4199 #endif
4200 
4201   // CFG and pinned nodes already handled
4202   if( n->in(0) ) {
4203     if( n->in(0)->is_top() ) return; // Dead?
4204 
4205     // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads
4206     // _must_ be pinned (they have to observe their control edge of course).
4207     // Unlike Stores (which modify an unallocable resource, the memory
4208     // state), Mods/Loads can float around.  So free them up.
4209     bool pinned = true;
4210     switch( n->Opcode() ) {
4211     case Op_DivI:
4212     case Op_DivF:
4213     case Op_DivD:
4214     case Op_ModI:
4215     case Op_ModF:
4216     case Op_ModD:
4217     case Op_LoadB:              // Same with Loads; they can sink
4218     case Op_LoadUB:             // during loop optimizations.
4219     case Op_LoadUS:
4220     case Op_LoadD:
4221     case Op_LoadF:
4222     case Op_LoadI:
4223     case Op_LoadKlass:
4224     case Op_LoadNKlass:
4225     case Op_LoadL:
4226     case Op_LoadS:
4227     case Op_LoadP:
4228     case Op_LoadBarrierSlowReg:
4229     case Op_LoadBarrierWeakSlowReg:


4486     }
4487     // Dump nodes it controls
4488     for( uint k = 0; k < _nodes.Size(); k++ ) {
4489       // (k < C->unique() && get_ctrl(find(k)) == n)
4490       if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) {
4491         Node *m = C->root()->find(k);
4492         if( m && m->outcnt() > 0 ) {
4493           if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
4494             tty->print_cr("*** BROKEN CTRL ACCESSOR!  _nodes[k] is %p, ctrl is %p",
4495                           _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL);
4496           }
4497           for( uint j = 0; j < loop->_nest; j++ )
4498             tty->print("  ");
4499           tty->print(" ");
4500           m->dump();
4501         }
4502       }
4503     }
4504   }
4505 }

4506 
4507 // Collect a R-P-O for the whole CFG.
4508 // Result list is in post-order (scan backwards for RPO)
4509 void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const {
4510   stk.push(start, 0);
4511   visited.set(start->_idx);
4512 
4513   while (stk.is_nonempty()) {
4514     Node* m   = stk.node();
4515     uint  idx = stk.index();
4516     if (idx < m->outcnt()) {
4517       stk.set_index(idx + 1);
4518       Node* n = m->raw_out(idx);
4519       if (n->is_CFG() && !visited.test_set(n->_idx)) {
4520         stk.push(n, 0);
4521       }
4522     } else {
4523       rpo_list.push(m);
4524       stk.pop();
4525     }
4526   }
4527 }
4528 #endif
4529 
4530 
4531 //=============================================================================
4532 //------------------------------LoopTreeIterator-----------------------------------
4533 
4534 // Advance to next loop tree using a preorder, left-to-right traversal.
4535 void LoopTreeIterator::next() {
4536   assert(!done(), "must not be done.");
4537   if (_curnt->_child != NULL) {
4538     _curnt = _curnt->_child;
4539   } else if (_curnt->_next != NULL) {
4540     _curnt = _curnt->_next;
4541   } else {
4542     while (_curnt != _root && _curnt->_next == NULL) {
4543       _curnt = _curnt->_parent;
4544     }
4545     if (_curnt == _root) {
4546       _curnt = NULL;
4547       assert(done(), "must be done.");
4548     } else {


3951     compute_lca_of_uses(n, early, true);
3952   }
3953 #endif
3954 
3955   // if this is a load, check for anti-dependent stores
3956   // We use a conservative algorithm to identify potential interfering
3957   // instructions and for rescheduling the load.  The users of the memory
3958   // input of this load are examined.  Any use which is not a load and is
3959   // dominated by early is considered a potentially interfering store.
3960   // This can produce false positives.
3961   if (n->is_Load() && LCA != early) {
3962     Node_List worklist;
3963 
3964     Node *mem = n->in(MemNode::Memory);
3965     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3966       Node* s = mem->fast_out(i);
3967       worklist.push(s);
3968     }
3969     while(worklist.size() != 0 && LCA != early) {
3970       Node* s = worklist.pop();
3971       if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint ||
3972           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
3973         continue;
3974       } else if (s->is_MergeMem()) {
3975         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
3976           Node* s1 = s->fast_out(i);
3977           worklist.push(s1);
3978         }
3979       } else {
3980         Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
3981         assert(sctrl != NULL || s->outcnt() == 0, "must have control");
3982         if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
3983           LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
3984         }
3985       }
3986     }
3987   }
3988 
3989   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
3990   return LCA;
3991 }


4168       Node *m = wq.at(i);
4169       for (uint i = 1; i < m->req(); i++) {
4170         Node* nn = m->in(i);
4171         if (nn == n) {
4172           return;
4173         }
4174         if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4175           wq.push(nn);
4176         }
4177       }
4178     }
4179     ShouldNotReachHere();
4180   }
4181 #endif
4182 }
4183 
4184 
4185 //------------------------------build_loop_late_post---------------------------
4186 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
4187 // Second pass finds latest legal placement, and ideal loop placement.
4188 void PhaseIdealLoop::build_loop_late_post(Node *n) {
4189   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4190 
4191   if (bs->build_loop_late_post(this, n)) {
4192     return;
4193   }
4194 
4195   build_loop_late_post_work(n, true);
4196 }
4197 
4198 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
4199 
4200   if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
4201     _igvn._worklist.push(n);  // Maybe we'll normalize it, if no more loops.
4202   }
4203 
4204 #ifdef ASSERT
4205   if (_verify_only && !n->is_CFG()) {
4206     // Check def-use domination.
4207     compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
4208   }
4209 #endif
4210 
4211   // CFG and pinned nodes already handled
4212   if( n->in(0) ) {
4213     if( n->in(0)->is_top() ) return; // Dead?
4214 
4215     // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads
4216     // _must_ be pinned (they have to observe their control edge of course).
4217     // Unlike Stores (which modify an unallocable resource, the memory
4218     // state), Mods/Loads can float around.  So free them up.

4219     switch( n->Opcode() ) {
4220     case Op_DivI:
4221     case Op_DivF:
4222     case Op_DivD:
4223     case Op_ModI:
4224     case Op_ModF:
4225     case Op_ModD:
4226     case Op_LoadB:              // Same with Loads; they can sink
4227     case Op_LoadUB:             // during loop optimizations.
4228     case Op_LoadUS:
4229     case Op_LoadD:
4230     case Op_LoadF:
4231     case Op_LoadI:
4232     case Op_LoadKlass:
4233     case Op_LoadNKlass:
4234     case Op_LoadL:
4235     case Op_LoadS:
4236     case Op_LoadP:
4237     case Op_LoadBarrierSlowReg:
4238     case Op_LoadBarrierWeakSlowReg:


4495     }
4496     // Dump nodes it controls
4497     for( uint k = 0; k < _nodes.Size(); k++ ) {
4498       // (k < C->unique() && get_ctrl(find(k)) == n)
4499       if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) {
4500         Node *m = C->root()->find(k);
4501         if( m && m->outcnt() > 0 ) {
4502           if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
4503             tty->print_cr("*** BROKEN CTRL ACCESSOR!  _nodes[k] is %p, ctrl is %p",
4504                           _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL);
4505           }
4506           for( uint j = 0; j < loop->_nest; j++ )
4507             tty->print("  ");
4508           tty->print(" ");
4509           m->dump();
4510         }
4511       }
4512     }
4513   }
4514 }
4515 #endif
4516 
4517 // Collect a R-P-O for the whole CFG.
4518 // Result list is in post-order (scan backwards for RPO)
4519 void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const {
4520   stk.push(start, 0);
4521   visited.set(start->_idx);
4522 
4523   while (stk.is_nonempty()) {
4524     Node* m   = stk.node();
4525     uint  idx = stk.index();
4526     if (idx < m->outcnt()) {
4527       stk.set_index(idx + 1);
4528       Node* n = m->raw_out(idx);
4529       if (n->is_CFG() && !visited.test_set(n->_idx)) {
4530         stk.push(n, 0);
4531       }
4532     } else {
4533       rpo_list.push(m);
4534       stk.pop();
4535     }
4536   }
4537 }

4538 
4539 
4540 //=============================================================================
4541 //------------------------------LoopTreeIterator-----------------------------------
4542 
4543 // Advance to next loop tree using a preorder, left-to-right traversal.
4544 void LoopTreeIterator::next() {
4545   assert(!done(), "must not be done.");
4546   if (_curnt->_child != NULL) {
4547     _curnt = _curnt->_child;
4548   } else if (_curnt->_next != NULL) {
4549     _curnt = _curnt->_next;
4550   } else {
4551     while (_curnt != _root && _curnt->_next == NULL) {
4552       _curnt = _curnt->_parent;
4553     }
4554     if (_curnt == _root) {
4555       _curnt = NULL;
4556       assert(done(), "must be done.");
4557     } else {
< prev index next >