< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page




 388   for (int i = C->macro_count()-1; i >= 0; i--) {
 389     Node* n = C->macro_node(i);
 390     if (!useful.member(n)) {
 391       remove_macro_node(n);
 392     }
 393   }
 394   // Remove useless CastII nodes with range check dependency
 395   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 396     Node* cast = range_check_cast_node(i);
 397     if (!useful.member(cast)) {
 398       remove_range_check_cast(cast);
 399     }
 400   }
 401   // Remove useless expensive node
 402   for (int i = C->expensive_count()-1; i >= 0; i--) {
 403     Node* n = C->expensive_node(i);
 404     if (!useful.member(n)) {
 405       remove_expensive_node(n);
 406     }
 407   }
 408   for (int i = value_type_ptr_count() - 1; i >= 0; i--) {
 409     ValueTypePtrNode* vtptr = value_type_ptr(i);
 410     if (!useful.member(vtptr)) {
 411       remove_value_type_ptr(vtptr);
 412     }
 413   }
 414   // clean up the late inline lists
 415   remove_useless_late_inlines(&_string_late_inlines, useful);
 416   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 417   remove_useless_late_inlines(&_late_inlines, useful);
 418   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 419 }
 420 
 421 //------------------------------frame_size_in_words-----------------------------
 422 // frame_slots in units of words
 423 int Compile::frame_size_in_words() const {
 424   // shift is 0 in LP32 and 1 in LP64
 425   const int shift = (LogBytesPerWord - LogBytesPerInt);
 426   int words = _frame_slots >> shift;
 427   assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
 428   return words;
 429 }
 430 
 431 // To bang the stack of this compiled method we use the stack size
 432 // that the interpreter would need in case of a deoptimization. This


1168   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1169   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1170   {
1171     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1172   }
1173   // Initialize the first few types.
1174   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1175   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1176   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1177   _num_alias_types = AliasIdxRaw+1;
1178   // Zero out the alias type cache.
1179   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1180   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1181   probe_alias_cache(NULL)->_index = AliasIdxTop;
1182 
1183   _intrinsics = NULL;
1184   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1185   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1186   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1187   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1188   _value_type_ptr_nodes = new(comp_arena()) GrowableArray<ValueTypePtrNode*>(comp_arena(), 8,  0, NULL);
1189   register_library_intrinsics();
1190 }
1191 
1192 //---------------------------init_start----------------------------------------
1193 // Install the StartNode on this compile object.
1194 void Compile::init_start(StartNode* s) {
1195   if (failing())
1196     return; // already failing
1197   assert(s == start(), "");
1198 }
1199 
1200 /**
1201  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1202  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1203  * the ideal graph.
1204  */
1205 StartNode* Compile::start() const {
1206   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1207   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1208     Node* start = root()->fast_out(i);


1954   }
1955   assert(predicate_count()==0, "should be clean!");
1956 }
1957 
1958 void Compile::add_range_check_cast(Node* n) {
1959   assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1960   assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
1961   _range_check_casts->append(n);
1962 }
1963 
1964 // Remove all range check dependent CastIINodes.
1965 void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
1966   for (int i = range_check_cast_count(); i > 0; i--) {
1967     Node* cast = range_check_cast_node(i-1);
1968     assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1969     igvn.replace_node(cast, cast->in(1));
1970   }
1971   assert(range_check_cast_count() == 0, "should be empty");
1972 }
1973 
1974 void Compile::add_value_type_ptr(ValueTypePtrNode* n) {
1975   assert(can_add_value_type_ptr(), "too late");
1976   assert(!_value_type_ptr_nodes->contains(n), "duplicate entry");
1977   _value_type_ptr_nodes->append(n);
1978 }
1979 
1980 void Compile::process_value_type_ptr_nodes(PhaseIterGVN &igvn) {
1981   for (int i = value_type_ptr_count(); i > 0; i--) {
1982     ValueTypePtrNode* vtptr = value_type_ptr(i-1);
1983     // once all inlining is over otherwise debug info can get
1984     // inconsistent
1985     vtptr->make_scalar_in_safepoints(igvn.C->root(), &igvn);
1986     igvn.replace_node(vtptr, vtptr->get_oop());









1987   }
1988   assert(value_type_ptr_count() == 0, "should be empty");
1989   _value_type_ptr_nodes = NULL;
1990   igvn.optimize();
1991 }
1992 
1993 // StringOpts and late inlining of string methods
1994 void Compile::inline_string_calls(bool parse_time) {
1995   {
1996     // remove useless nodes to make the usage analysis simpler
1997     ResourceMark rm;
1998     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1999   }
2000 
2001   {
2002     ResourceMark rm;
2003     print_method(PHASE_BEFORE_STRINGOPTS, 3);
2004     PhaseStringOpts pso(initial_gvn(), for_igvn());
2005     print_method(PHASE_AFTER_STRINGOPTS, 3);
2006   }
2007 
2008   // now inline anything that we skipped the first time around
2009   if (!parse_time) {


2220   remove_speculative_types(igvn);
2221 
2222   // No more new expensive nodes will be added to the list from here
2223   // so keep only the actual candidates for optimizations.
2224   cleanup_expensive_nodes(igvn);
2225 
2226   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2227     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2228     initial_gvn()->replace_with(&igvn);
2229     for_igvn()->clear();
2230     Unique_Node_List new_worklist(C->comp_arena());
2231     {
2232       ResourceMark rm;
2233       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2234     }
2235     set_for_igvn(&new_worklist);
2236     igvn = PhaseIterGVN(initial_gvn());
2237     igvn.optimize();
2238   }
2239 
2240   if (value_type_ptr_count() > 0) {
2241     process_value_type_ptr_nodes(igvn);

2242   }
2243 
2244   // Perform escape analysis
2245   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2246     if (has_loops()) {
2247       // Cleanup graph (remove dead nodes).
2248       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2249       PhaseIdealLoop ideal_loop( igvn, false, true );
2250       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2251       if (failing())  return;
2252     }
2253     ConnectionGraph::do_analysis(this, &igvn);
2254 
2255     if (failing())  return;
2256 
2257     // Optimize out fields loads from scalar replaceable allocations.
2258     igvn.optimize();
2259     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2260 
2261     if (failing())  return;


2368       return;
2369     }
2370   }
2371 
2372   DEBUG_ONLY( _modified_nodes = NULL; )
2373  } // (End scope of igvn; run destructor if necessary for asserts.)
2374 
2375  process_print_inlining();
2376  // A method with only infinite loops has no edges entering loops from root
2377  {
2378    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2379    if (final_graph_reshaping()) {
2380      assert(failing(), "must bail out w/ explicit message");
2381      return;
2382    }
2383  }
2384 
2385  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2386 }
2387 
2388 // Fixme remove
2389 static void check_for_value_node(Node &n, void* C) {
2390   if (n.is_ValueType()) {
2391 #ifdef ASSERT
2392     ((Compile*)C)->method()->print_short_name();
2393     tty->print_cr("");
2394     n.dump(-1);
2395     assert(false, "Unable to match ValueTypeNode");
2396 #endif
2397     ((Compile*)C)->record_failure("Unable to match ValueTypeNode");
2398   }
2399 }
2400 
2401 //------------------------------Code_Gen---------------------------------------
2402 // Given a graph, generate code for it
2403 void Compile::Code_Gen() {
2404   // FIXME remove
2405   root()->walk(Node::nop, check_for_value_node, this);
2406 
2407   if (failing()) {
2408     return;
2409   }
2410 
2411   // Perform instruction selection.  You might think we could reclaim Matcher
2412   // memory PDQ, but actually the Matcher is used in generating spill code.
2413   // Internals of the Matcher (including some VectorSets) must remain live
2414   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2415   // set a bit in reclaimed memory.
2416 
2417   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2418   // nodes.  Mapping is only valid at the root of each matched subtree.
2419   NOT_PRODUCT( verify_graph_edges(); )
2420 
2421   Matcher matcher;
2422   _matcher = &matcher;
2423   {
2424     TracePhase tp("matcher", &timers[_t_matcher]);
2425     matcher.match();
2426   }


3365           }
3366           // Push their uses so we get a chance to remove node made
3367           // redundant
3368           for (DUIterator_Fast imax, i = k->fast_outs(imax); i < imax; i++) {
3369             Node* u = k->fast_out(i);
3370             assert(!wq.contains(u), "shouldn't process one node several times");
3371             if (u->Opcode() == Op_LShiftL ||
3372                 u->Opcode() == Op_AddL ||
3373                 u->Opcode() == Op_SubL ||
3374                 u->Opcode() == Op_AddP) {
3375               wq.push(u);
3376             }
3377           }
3378           // Replace all nodes with identical edges as m with m
3379           k->subsume_by(m, this);
3380         }
3381       }
3382     }
3383     break;
3384   }


3385   case Op_ValueType: {
3386     ValueTypeNode* vt = n->as_ValueType();
3387     vt->make_scalar_in_safepoints(root(), NULL);
3388     if (vt->outcnt() == 0) {
3389       vt->disconnect_inputs(NULL, this);
3390     }
3391     break;
3392   }
3393   case Op_ValueTypePtr: {
3394     ShouldNotReachHere();
3395     break;
3396   }

3397   default:
3398     assert( !n->is_Call(), "" );
3399     assert( !n->is_Mem(), "" );
3400     assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3401     break;
3402   }
3403 
3404   // Collect CFG split points
3405   if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3406     frc._tests.push(n);
3407   }
3408 }
3409 
3410 //------------------------------final_graph_reshaping_walk---------------------
3411 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3412 // requires that the walk visits a node's inputs before visiting the node.
3413 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3414   ResourceArea *area = Thread::current()->resource_area();
3415   Unique_Node_List sfpt(area);
3416 




 388   for (int i = C->macro_count()-1; i >= 0; i--) {
 389     Node* n = C->macro_node(i);
 390     if (!useful.member(n)) {
 391       remove_macro_node(n);
 392     }
 393   }
 394   // Remove useless CastII nodes with range check dependency
 395   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 396     Node* cast = range_check_cast_node(i);
 397     if (!useful.member(cast)) {
 398       remove_range_check_cast(cast);
 399     }
 400   }
 401   // Remove useless expensive node
 402   for (int i = C->expensive_count()-1; i >= 0; i--) {
 403     Node* n = C->expensive_node(i);
 404     if (!useful.member(n)) {
 405       remove_expensive_node(n);
 406     }
 407   }
 408   // Remove useless value type nodes
 409   if (_value_type_nodes != NULL) {
 410     _value_type_nodes->remove_useless_nodes(useful.member_set());


 411   }
 412   // clean up the late inline lists
 413   remove_useless_late_inlines(&_string_late_inlines, useful);
 414   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 415   remove_useless_late_inlines(&_late_inlines, useful);
 416   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 417 }
 418 
 419 //------------------------------frame_size_in_words-----------------------------
 420 // frame_slots in units of words
 421 int Compile::frame_size_in_words() const {
 422   // shift is 0 in LP32 and 1 in LP64
 423   const int shift = (LogBytesPerWord - LogBytesPerInt);
 424   int words = _frame_slots >> shift;
 425   assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
 426   return words;
 427 }
 428 
 429 // To bang the stack of this compiled method we use the stack size
 430 // that the interpreter would need in case of a deoptimization. This


1166   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1167   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1168   {
1169     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1170   }
1171   // Initialize the first few types.
1172   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1173   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1174   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1175   _num_alias_types = AliasIdxRaw+1;
1176   // Zero out the alias type cache.
1177   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1178   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1179   probe_alias_cache(NULL)->_index = AliasIdxTop;
1180 
1181   _intrinsics = NULL;
1182   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1183   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1184   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1185   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1186   _value_type_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
1187   register_library_intrinsics();
1188 }
1189 
1190 //---------------------------init_start----------------------------------------
1191 // Install the StartNode on this compile object.
1192 void Compile::init_start(StartNode* s) {
1193   if (failing())
1194     return; // already failing
1195   assert(s == start(), "");
1196 }
1197 
1198 /**
1199  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1200  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1201  * the ideal graph.
1202  */
1203 StartNode* Compile::start() const {
1204   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1205   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1206     Node* start = root()->fast_out(i);


1952   }
1953   assert(predicate_count()==0, "should be clean!");
1954 }
1955 
1956 void Compile::add_range_check_cast(Node* n) {
1957   assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1958   assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
1959   _range_check_casts->append(n);
1960 }
1961 
1962 // Remove all range check dependent CastIINodes.
1963 void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
1964   for (int i = range_check_cast_count(); i > 0; i--) {
1965     Node* cast = range_check_cast_node(i-1);
1966     assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1967     igvn.replace_node(cast, cast->in(1));
1968   }
1969   assert(range_check_cast_count() == 0, "should be empty");
1970 }
1971 
1972 void Compile::add_value_type(Node* n) {
1973   assert(n->is_ValueTypeBase(), "unexpected node");
1974   if (_value_type_nodes != NULL) {
1975     _value_type_nodes->push(n);
1976   }
1977 }
1978 
1979 void Compile::remove_value_type(Node* n) {
1980   assert(n->is_ValueTypeBase(), "unexpected node");
1981   if (_value_type_nodes != NULL) {
1982     _value_type_nodes->remove(n);
1983   }
1984 }
1985 
1986 void Compile::process_value_types(PhaseIterGVN &igvn) {
1987   // Make value types scalar in safepoints
1988   while (_value_type_nodes->size() != 0) {
1989     ValueTypeBaseNode* vt = _value_type_nodes->pop()->as_ValueTypeBase();
1990     vt->make_scalar_in_safepoints(igvn.C->root(), &igvn);
1991     if (vt->is_ValueTypePtr()) {
1992       igvn.replace_node(vt, vt->get_oop());
1993     }
1994   }
1995   _value_type_nodes = NULL;

1996   igvn.optimize();
1997 }
1998 
1999 // StringOpts and late inlining of string methods
2000 void Compile::inline_string_calls(bool parse_time) {
2001   {
2002     // remove useless nodes to make the usage analysis simpler
2003     ResourceMark rm;
2004     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2005   }
2006 
2007   {
2008     ResourceMark rm;
2009     print_method(PHASE_BEFORE_STRINGOPTS, 3);
2010     PhaseStringOpts pso(initial_gvn(), for_igvn());
2011     print_method(PHASE_AFTER_STRINGOPTS, 3);
2012   }
2013 
2014   // now inline anything that we skipped the first time around
2015   if (!parse_time) {


2226   remove_speculative_types(igvn);
2227 
2228   // No more new expensive nodes will be added to the list from here
2229   // so keep only the actual candidates for optimizations.
2230   cleanup_expensive_nodes(igvn);
2231 
2232   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2233     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2234     initial_gvn()->replace_with(&igvn);
2235     for_igvn()->clear();
2236     Unique_Node_List new_worklist(C->comp_arena());
2237     {
2238       ResourceMark rm;
2239       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2240     }
2241     set_for_igvn(&new_worklist);
2242     igvn = PhaseIterGVN(initial_gvn());
2243     igvn.optimize();
2244   }
2245 
2246   if (_value_type_nodes->size() > 0) {
2247     // Do this once all inlining is over to avoid getting inconsistent debug info
2248     process_value_types(igvn);
2249   }
2250 
2251   // Perform escape analysis
2252   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2253     if (has_loops()) {
2254       // Cleanup graph (remove dead nodes).
2255       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2256       PhaseIdealLoop ideal_loop( igvn, false, true );
2257       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2258       if (failing())  return;
2259     }
2260     ConnectionGraph::do_analysis(this, &igvn);
2261 
2262     if (failing())  return;
2263 
2264     // Optimize out fields loads from scalar replaceable allocations.
2265     igvn.optimize();
2266     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2267 
2268     if (failing())  return;


2375       return;
2376     }
2377   }
2378 
2379   DEBUG_ONLY( _modified_nodes = NULL; )
2380  } // (End scope of igvn; run destructor if necessary for asserts.)
2381 
2382  process_print_inlining();
2383  // A method with only infinite loops has no edges entering loops from root
2384  {
2385    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2386    if (final_graph_reshaping()) {
2387      assert(failing(), "must bail out w/ explicit message");
2388      return;
2389    }
2390  }
2391 
2392  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2393 }
2394 













2395 //------------------------------Code_Gen---------------------------------------
2396 // Given a graph, generate code for it
2397 void Compile::Code_Gen() {



2398   if (failing()) {
2399     return;
2400   }
2401 
2402   // Perform instruction selection.  You might think we could reclaim Matcher
2403   // memory PDQ, but actually the Matcher is used in generating spill code.
2404   // Internals of the Matcher (including some VectorSets) must remain live
2405   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2406   // set a bit in reclaimed memory.
2407 
2408   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2409   // nodes.  Mapping is only valid at the root of each matched subtree.
2410   NOT_PRODUCT( verify_graph_edges(); )
2411 
2412   Matcher matcher;
2413   _matcher = &matcher;
2414   {
2415     TracePhase tp("matcher", &timers[_t_matcher]);
2416     matcher.match();
2417   }


3356           }
3357           // Push their uses so we get a chance to remove node made
3358           // redundant
3359           for (DUIterator_Fast imax, i = k->fast_outs(imax); i < imax; i++) {
3360             Node* u = k->fast_out(i);
3361             assert(!wq.contains(u), "shouldn't process one node several times");
3362             if (u->Opcode() == Op_LShiftL ||
3363                 u->Opcode() == Op_AddL ||
3364                 u->Opcode() == Op_SubL ||
3365                 u->Opcode() == Op_AddP) {
3366               wq.push(u);
3367             }
3368           }
3369           // Replace all nodes with identical edges as m with m
3370           k->subsume_by(m, this);
3371         }
3372       }
3373     }
3374     break;
3375   }
3376 #ifdef ASSERT
3377   case Op_ValueTypePtr:
3378   case Op_ValueType: {
3379     n->dump(-1);
3380     assert(false, "value type node was not removed");







3381     break;
3382   }
3383 #endif
3384   default:
3385     assert( !n->is_Call(), "" );
3386     assert( !n->is_Mem(), "" );
3387     assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3388     break;
3389   }
3390 
3391   // Collect CFG split points
3392   if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3393     frc._tests.push(n);
3394   }
3395 }
3396 
3397 //------------------------------final_graph_reshaping_walk---------------------
3398 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3399 // requires that the walk visits a node's inputs before visiting the node.
3400 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3401   ResourceArea *area = Thread::current()->resource_area();
3402   Unique_Node_List sfpt(area);
3403 


< prev index next >