src/share/vm/opto/gcm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/share/vm/opto/gcm.cpp	Tue Aug  3 11:01:46 2010
--- new/src/share/vm/opto/gcm.cpp	Tue Aug  3 11:01:46 2010

*** 839,849 **** --- 839,849 ---- void PhaseCFG::partial_latency_of_defs(Node *n) { // Set the latency for this instruction #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print("# latency_to_inputs: node_latency[%d] = %d for node", ! n->_idx, _node_latency.at_grow(n->_idx)); ! n->_idx, _node_latency->at_grow(n->_idx)); dump(); } #endif if (n->is_Proj())
*** 851,861 **** --- 851,861 ---- if (n->is_Root()) return; uint nlen = n->len(); ! uint use_latency = _node_latency.at_grow(n->_idx); ! uint use_latency = _node_latency->at_grow(n->_idx); uint use_pre_order = _bbs[n->_idx]->_pre_order; for ( uint j=0; j<nlen; j++ ) { Node *def = n->in(j);
*** 882,900 **** --- 882,900 ---- continue; uint delta_latency = n->latency(j); uint current_latency = delta_latency + use_latency; ! if (_node_latency.at_grow(def->_idx) < current_latency) { ! _node_latency.at_put_grow(def->_idx, current_latency); ! if (_node_latency->at_grow(def->_idx) < current_latency) { ! _node_latency->at_put_grow(def->_idx, current_latency); } #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, ! _node_latency.at_grow(def->_idx)); ! _node_latency->at_grow(def->_idx)); } #endif } }
*** 924,934 **** --- 924,934 ---- if (use_pre_order == def_pre_order && use->is_Phi()) return 0; uint nlen = use->len(); ! uint nl = _node_latency.at_grow(use->_idx); ! uint nl = _node_latency->at_grow(use->_idx); for ( uint j=0; j<nlen; j++ ) { if (use->in(j) == n) { // Change this if we want local latencies uint ul = use->latency(j);
*** 960,970 **** --- 960,970 ---- void PhaseCFG::latency_from_uses(Node *n) { // Set the latency for this instruction #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print("# latency_from_outputs: node_latency[%d] = %d for node", ! n->_idx, _node_latency.at_grow(n->_idx)); ! n->_idx, _node_latency->at_grow(n->_idx)); dump(); } #endif uint latency=0; const Node *def = n->is_Proj() ? n->in(0): n;
*** 973,995 **** --- 973,995 ---- uint l = latency_from_use(n, def, n->fast_out(i)); if (latency < l) latency = l; } ! _node_latency.at_put_grow(n->_idx, latency); ! _node_latency->at_put_grow(n->_idx, latency); } //------------------------------hoist_to_cheaper_block------------------------- // Pick a block for node self, between early and LCA, that is a cheaper // alternative to LCA. Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { const double delta = 1+PROB_UNLIKELY_MAG(4); Block* least = LCA; double least_freq = least->_freq; ! uint target = _node_latency.at_grow(self->_idx); ! uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx); ! uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx); ! uint target = _node_latency->at_grow(self->_idx); ! uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx); ! uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx); bool in_latency = (target <= start_latency); const Block* root_block = _bbs[_root->_idx]; // Turn off latency scheduling if scheduling is just plain off if (!C->do_scheduling())
*** 1003,1013 **** --- 1003,1013 ---- in_latency = true; #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print("# Find cheaper block for latency %d: ", ! _node_latency.at_grow(self->_idx)); ! _node_latency->at_grow(self->_idx)); self->dump(); tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", LCA->_pre_order, LCA->_nodes[0]->_idx, start_latency,
*** 1030,1042 **** --- 1030,1042 ---- // Don't hoist machine instructions to the root basic block if (mach && LCA == root_block) break; ! uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx); ! uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx); uint end_idx = LCA->end_idx(); ! uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx); ! uint end_lat = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx); double LCA_freq = LCA->_freq; #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
*** 1071,1081 **** --- 1071,1081 ---- #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); } #endif ! _node_latency.at_put_grow(self->_idx, end_latency); ! _node_latency->at_put_grow(self->_idx, end_latency); partial_latency_of_defs(self); } return least; }
*** 1253,1264 **** --- 1253,1263 ---- proj_list.push(_root); // Add real root as another root proj_list.pop(); // Compute the latency information (via backwards walk) for all the // instructions in the graph GrowableArray<uint> node_latency; _node_latency = node_latency; + _node_latency = new GrowableArray<uint>(); // resource_area allocation if( C->do_scheduling() ) ComputeLatenciesBackwards(visited, stack); // Now schedule all codes as LATE as possible. This is the LCA in the
*** 1339,1348 **** --- 1338,1349 ---- for (uint i = 0; i < _num_blocks; i++) { _blocks[i]->dump(); } } #endif + // Dead. + _node_latency = (GrowableArray<uint> *)0xdeadbeef; } //------------------------------Estimate_Block_Frequency----------------------- // Estimate block frequencies based on IfNode probabilities.

src/share/vm/opto/gcm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File