< prev index next >

src/hotspot/share/opto/compile.hpp

Print this page




 405   bool                  _do_scheduling;         // True if we intend to do scheduling
 406   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
 407   bool                  _do_count_invocations;  // True if we generate code to count invocations
 408   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
 409   bool                  _do_vector_loop;        // True if allowed to execute loop in parallel iterations
 410   bool                  _use_cmove;             // True if CMove should be used without profitability analysis
 411   bool                  _age_code;              // True if we need to profile code age (decrement the aging counter)
 412   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
 413   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 414   bool                  _print_inlining;        // True if we should print inlining for this compilation
 415   bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 416 #ifndef PRODUCT
 417   bool                  _trace_opto_output;
 418   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 419 #endif
 420   bool                  _has_irreducible_loop;  // Found irreducible loops
 421   // JSR 292
 422   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
 423   RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
 424   int                   _loop_opts_cnt;         // loop opts round


 425 
 426   // Compilation environment.
 427   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
 428   void*                 _barrier_set_state;     // Potential GC barrier state for Compile
 429   ciEnv*                _env;                   // CI interface
 430   DirectiveSet*         _directive;             // Compiler directive
 431   CompileLog*           _log;                   // from CompilerThread
 432   const char*           _failure_reason;        // for record_failure/failing pattern
 433   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
 434   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
 435   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
 436   GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
 437   GrowableArray<Node*>* _range_check_casts;     // List of CastII nodes with a range check dependency
 438   GrowableArray<Node*>* _opaque4_nodes;         // List of Opaque4 nodes that have a default value
 439   Unique_Node_List*     _value_type_nodes;      // List of ValueType nodes
 440   ConnectionGraph*      _congraph;
 441 #ifndef PRODUCT
 442   IdealGraphPrinter*    _printer;
 443 #endif
 444 


 704   void          set_do_method_data_update(bool z) { _do_method_data_update = z; }
 705   bool              do_vector_loop() const      { return _do_vector_loop; }
 706   void          set_do_vector_loop(bool z)      { _do_vector_loop = z; }
 707   bool              use_cmove() const           { return _use_cmove; }
 708   void          set_use_cmove(bool z)           { _use_cmove = z; }
 709   bool              age_code() const             { return _age_code; }
 710   void          set_age_code(bool z)             { _age_code = z; }
 711   int               AliasLevel() const           { return _AliasLevel; }
 712   bool              print_assembly() const       { return _print_assembly; }
 713   void          set_print_assembly(bool z)       { _print_assembly = z; }
 714   bool              print_inlining() const       { return _print_inlining; }
 715   void          set_print_inlining(bool z)       { _print_inlining = z; }
 716   bool              print_intrinsics() const     { return _print_intrinsics; }
 717   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
 718   RTMState          rtm_state()  const           { return _rtm_state; }
 719   void          set_rtm_state(RTMState s)        { _rtm_state = s; }
 720   bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
 721   bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
 722   uint              max_node_limit() const       { return (uint)_max_node_limit; }
 723   void          set_max_node_limit(uint n)       { _max_node_limit = n; }



 724 
 725   // Support for scalarized value type calling convention
 726   bool              has_scalarized_args() const  { return _method != NULL && _method->has_scalarized_args(); }
 727   bool              needs_stack_repair()  const  { return _method != NULL && _method->get_Method()->c2_needs_stack_repair(); }
 728   int               sp_inc_offset()       const  { return _sp_inc_slot_offset_in_bytes; }
 729 
 730   // check the CompilerOracle for special behaviours for this compile
 731   bool          method_has_option(const char * option) {
 732     return method() != NULL && method()->has_option(option);
 733   }
 734 
 735 #ifndef PRODUCT
 736   bool          trace_opto_output() const       { return _trace_opto_output; }
 737   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
 738   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
 739   int _in_dump_cnt;  // Required for dumping ir nodes.
 740 #endif
 741   bool              has_irreducible_loop() const { return _has_irreducible_loop; }
 742   void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
 743 


 837   int   range_check_cast_count()       const { return _range_check_casts->length(); }
 838   // Remove all range check dependent CastIINodes.
 839   void  remove_range_check_casts(PhaseIterGVN &igvn);
 840 
 841   void add_opaque4_node(Node* n);
 842   void remove_opaque4_node(Node* n) {
 843     if (_opaque4_nodes->contains(n)) {
 844       _opaque4_nodes->remove(n);
 845     }
 846   }
 847   Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx);  }
 848   int   opaque4_count()       const { return _opaque4_nodes->length(); }
 849   void  remove_opaque4_nodes(PhaseIterGVN &igvn);
 850 
 851   // Keep track of value type nodes for later processing
 852   void add_value_type(Node* n);
 853   void remove_value_type(Node* n);
 854   void process_value_types(PhaseIterGVN &igvn);
 855   bool can_add_value_type() const { return _value_type_nodes != NULL; }
 856 


 857   // remove the opaque nodes that protect the predicates so that the unused checks and
 858   // uncommon traps will be eliminated from the graph.
 859   void cleanup_loop_predicates(PhaseIterGVN &igvn);
 860   bool is_predicate_opaq(Node * n) {
 861     return _predicate_opaqs->contains(n);
 862   }
 863 
 864   // Are there candidate expensive nodes for optimization?
 865   bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
 866   // Check whether n1 and n2 are similar
 867   static int cmp_expensive_nodes(Node* n1, Node* n2);
 868   // Sort expensive nodes to locate similar expensive nodes
 869   void sort_expensive_nodes();
 870 
 871   // Compilation environment.
 872   Arena*      comp_arena()           { return &_comp_arena; }
 873   ciEnv*      env() const            { return _env; }
 874   CompileLog* log() const            { return _log; }
 875   bool        failing() const        { return _env->failing() || _failure_reason != NULL; }
 876   const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }


 980   void*             type_hwm()                  { return _type_hwm; }
 981   size_t            type_last_size()            { return _type_last_size; }
 982   int               num_alias_types()           { return _num_alias_types; }
 983 
 984   void          init_type_arena()                       { _type_arena = &_Compile_types; }
 985   void          set_type_arena(Arena* a)                { _type_arena = a; }
 986   void          set_type_dict(Dict* d)                  { _type_dict = d; }
 987   void          set_type_hwm(void* p)                   { _type_hwm = p; }
 988   void          set_type_last_size(size_t sz)           { _type_last_size = sz; }
 989 
 990   const TypeFunc* last_tf(ciMethod* m) {
 991     return (m == _last_tf_m) ? _last_tf : NULL;
 992   }
 993   void set_last_tf(ciMethod* m, const TypeFunc* tf) {
 994     assert(m != NULL || tf == NULL, "");
 995     _last_tf_m = m;
 996     _last_tf = tf;
 997   }
 998 
 999   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
1000   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
1001   bool         have_alias_type(const TypePtr* adr_type);
1002   AliasType*        alias_type(ciField*         field);
1003 
1004   int               get_alias_index(const TypePtr* at)  { return alias_type(at)->index(); }
1005   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
1006   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
1007 
1008   // Building nodes
1009   void              rethrow_exceptions(JVMState* jvms);
1010   void              return_values(JVMState* jvms);
1011   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
1012 
1013   // Decide how to build a call.
1014   // The profile factor is a discount to apply to this site's interp. profile.
1015   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
1016                                    JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
1017                                    bool allow_intrinsics = true, bool delayed_forbidden = false);
1018   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
1019     return should_delay_string_inlining(call_method, jvms) ||
1020            should_delay_boxing_inlining(call_method, jvms);
1021   }
1022   bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
1023   bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
1024 


1301   uint varargs_C_out_slots_killed() const;
1302 
1303   // Number of Stack Slots consumed by a synchronization entry
1304   int sync_stack_slots() const;
1305 
1306   // Compute the name of old_SP.  See <arch>.ad for frame layout.
1307   OptoReg::Name compute_old_SP();
1308 
1309  private:
1310   // Phase control:
1311   void Init(int aliaslevel);                     // Prepare for a single compilation
1312   int  Inline_Warm();                            // Find more inlining work.
1313   void Finish_Warm();                            // Give up on further inlines.
1314   void Optimize();                               // Given a graph, optimize it
1315   void Code_Gen();                               // Generate code from a graph
1316 
1317   // Management of the AliasType table.
1318   void grow_alias_types();
1319   AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1320   const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1321   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1322 
1323   void verify_top(Node*) const PRODUCT_RETURN;
1324 
1325   // Intrinsic setup.
1326   void           register_library_intrinsics();                            // initializer
1327   CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor
1328   int            intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found);  // helper
1329   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
1330   void           register_intrinsic(CallGenerator* cg);                    // update fn
1331 
1332 #ifndef PRODUCT
1333   static juint  _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
1334   static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
1335 #endif
1336   // Function calls made by the public function final_graph_reshaping.
1337   // No need to be made public as they are not called elsewhere.
1338   void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
1339   void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop);
1340   void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
1341   void eliminate_redundant_card_marks(Node* n);




 405   bool                  _do_scheduling;         // True if we intend to do scheduling
 406   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
 407   bool                  _do_count_invocations;  // True if we generate code to count invocations
 408   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
 409   bool                  _do_vector_loop;        // True if allowed to execute loop in parallel iterations
 410   bool                  _use_cmove;             // True if CMove should be used without profitability analysis
 411   bool                  _age_code;              // True if we need to profile code age (decrement the aging counter)
 412   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
 413   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 414   bool                  _print_inlining;        // True if we should print inlining for this compilation
 415   bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 416 #ifndef PRODUCT
 417   bool                  _trace_opto_output;
 418   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 419 #endif
 420   bool                  _has_irreducible_loop;  // Found irreducible loops
 421   // JSR 292
 422   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
 423   RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
 424   int                   _loop_opts_cnt;         // loop opts round
 425   bool                  _has_flattened_accesses; // Any known flattened array accesses?
 426   bool                  _flattened_accesses_share_alias; // Initially all flattened array share a single slice
 427   
 428   // Compilation environment.
 429   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
 430   void*                 _barrier_set_state;     // Potential GC barrier state for Compile
 431   ciEnv*                _env;                   // CI interface
 432   DirectiveSet*         _directive;             // Compiler directive
 433   CompileLog*           _log;                   // from CompilerThread
 434   const char*           _failure_reason;        // for record_failure/failing pattern
 435   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
 436   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
 437   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
 438   GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
 439   GrowableArray<Node*>* _range_check_casts;     // List of CastII nodes with a range check dependency
 440   GrowableArray<Node*>* _opaque4_nodes;         // List of Opaque4 nodes that have a default value
 441   Unique_Node_List*     _value_type_nodes;      // List of ValueType nodes
 442   ConnectionGraph*      _congraph;
 443 #ifndef PRODUCT
 444   IdealGraphPrinter*    _printer;
 445 #endif
 446 


 706   void          set_do_method_data_update(bool z) { _do_method_data_update = z; }
 707   bool              do_vector_loop() const      { return _do_vector_loop; }
 708   void          set_do_vector_loop(bool z)      { _do_vector_loop = z; }
 709   bool              use_cmove() const           { return _use_cmove; }
 710   void          set_use_cmove(bool z)           { _use_cmove = z; }
 711   bool              age_code() const             { return _age_code; }
 712   void          set_age_code(bool z)             { _age_code = z; }
 713   int               AliasLevel() const           { return _AliasLevel; }
 714   bool              print_assembly() const       { return _print_assembly; }
 715   void          set_print_assembly(bool z)       { _print_assembly = z; }
 716   bool              print_inlining() const       { return _print_inlining; }
 717   void          set_print_inlining(bool z)       { _print_inlining = z; }
 718   bool              print_intrinsics() const     { return _print_intrinsics; }
 719   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
 720   RTMState          rtm_state()  const           { return _rtm_state; }
 721   void          set_rtm_state(RTMState s)        { _rtm_state = s; }
 722   bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
 723   bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
 724   uint              max_node_limit() const       { return (uint)_max_node_limit; }
 725   void          set_max_node_limit(uint n)       { _max_node_limit = n; }
 726   void          set_flattened_accesses()         { _has_flattened_accesses = true; }
 727   bool          flattened_accesses_share_alias() const { return _flattened_accesses_share_alias; }
 728   void          set_flattened_accesses_share_alias(bool z) { _flattened_accesses_share_alias = z; }
 729 
 730   // Support for scalarized value type calling convention
 731   bool              has_scalarized_args() const  { return _method != NULL && _method->has_scalarized_args(); }
 732   bool              needs_stack_repair()  const  { return _method != NULL && _method->get_Method()->c2_needs_stack_repair(); }
 733   int               sp_inc_offset()       const  { return _sp_inc_slot_offset_in_bytes; }
 734 
 735   // check the CompilerOracle for special behaviours for this compile
 736   bool          method_has_option(const char * option) {
 737     return method() != NULL && method()->has_option(option);
 738   }
 739 
 740 #ifndef PRODUCT
 741   bool          trace_opto_output() const       { return _trace_opto_output; }
 742   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
 743   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
 744   int _in_dump_cnt;  // Required for dumping ir nodes.
 745 #endif
 746   bool              has_irreducible_loop() const { return _has_irreducible_loop; }
 747   void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
 748 


 842   int   range_check_cast_count()       const { return _range_check_casts->length(); }
 843   // Remove all range check dependent CastIINodes.
 844   void  remove_range_check_casts(PhaseIterGVN &igvn);
 845 
 846   void add_opaque4_node(Node* n);
 847   void remove_opaque4_node(Node* n) {
 848     if (_opaque4_nodes->contains(n)) {
 849       _opaque4_nodes->remove(n);
 850     }
 851   }
 852   Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx);  }
 853   int   opaque4_count()       const { return _opaque4_nodes->length(); }
 854   void  remove_opaque4_nodes(PhaseIterGVN &igvn);
 855 
 856   // Keep track of value type nodes for later processing
 857   void add_value_type(Node* n);
 858   void remove_value_type(Node* n);
 859   void process_value_types(PhaseIterGVN &igvn);
 860   bool can_add_value_type() const { return _value_type_nodes != NULL; }
 861 
 862   void adjust_flattened_array_access_aliases(PhaseIterGVN& igvn);
 863   
 864   // remove the opaque nodes that protect the predicates so that the unused checks and
 865   // uncommon traps will be eliminated from the graph.
 866   void cleanup_loop_predicates(PhaseIterGVN &igvn);
 867   bool is_predicate_opaq(Node * n) {
 868     return _predicate_opaqs->contains(n);
 869   }
 870 
 871   // Are there candidate expensive nodes for optimization?
 872   bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
 873   // Check whether n1 and n2 are similar
 874   static int cmp_expensive_nodes(Node* n1, Node* n2);
 875   // Sort expensive nodes to locate similar expensive nodes
 876   void sort_expensive_nodes();
 877 
 878   // Compilation environment.
 879   Arena*      comp_arena()           { return &_comp_arena; }
 880   ciEnv*      env() const            { return _env; }
 881   CompileLog* log() const            { return _log; }
 882   bool        failing() const        { return _env->failing() || _failure_reason != NULL; }
 883   const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }


 987   void*             type_hwm()                  { return _type_hwm; }
 988   size_t            type_last_size()            { return _type_last_size; }
 989   int               num_alias_types()           { return _num_alias_types; }
 990 
 991   void          init_type_arena()                       { _type_arena = &_Compile_types; }
 992   void          set_type_arena(Arena* a)                { _type_arena = a; }
 993   void          set_type_dict(Dict* d)                  { _type_dict = d; }
 994   void          set_type_hwm(void* p)                   { _type_hwm = p; }
 995   void          set_type_last_size(size_t sz)           { _type_last_size = sz; }
 996 
 997   const TypeFunc* last_tf(ciMethod* m) {
 998     return (m == _last_tf_m) ? _last_tf : NULL;
 999   }
1000   void set_last_tf(ciMethod* m, const TypeFunc* tf) {
1001     assert(m != NULL || tf == NULL, "");
1002     _last_tf_m = m;
1003     _last_tf = tf;
1004   }
1005 
1006   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
1007   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); }
1008   bool         have_alias_type(const TypePtr* adr_type);
1009   AliasType*        alias_type(ciField*         field);
1010 
1011   int               get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, NULL, uncached)->index(); }
1012   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
1013   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
1014 
1015   // Building nodes
1016   void              rethrow_exceptions(JVMState* jvms);
1017   void              return_values(JVMState* jvms);
1018   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
1019 
1020   // Decide how to build a call.
1021   // The profile factor is a discount to apply to this site's interp. profile.
1022   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
1023                                    JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
1024                                    bool allow_intrinsics = true, bool delayed_forbidden = false);
1025   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
1026     return should_delay_string_inlining(call_method, jvms) ||
1027            should_delay_boxing_inlining(call_method, jvms);
1028   }
1029   bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
1030   bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
1031 


1308   uint varargs_C_out_slots_killed() const;
1309 
1310   // Number of Stack Slots consumed by a synchronization entry
1311   int sync_stack_slots() const;
1312 
1313   // Compute the name of old_SP.  See <arch>.ad for frame layout.
1314   OptoReg::Name compute_old_SP();
1315 
1316  private:
1317   // Phase control:
1318   void Init(int aliaslevel);                     // Prepare for a single compilation
1319   int  Inline_Warm();                            // Find more inlining work.
1320   void Finish_Warm();                            // Give up on further inlines.
1321   void Optimize();                               // Given a graph, optimize it
1322   void Code_Gen();                               // Generate code from a graph
1323 
1324   // Management of the AliasType table.
1325   void grow_alias_types();
1326   AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1327   const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1328   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false);
1329 
1330   void verify_top(Node*) const PRODUCT_RETURN;
1331 
1332   // Intrinsic setup.
1333   void           register_library_intrinsics();                            // initializer
1334   CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor
1335   int            intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found);  // helper
1336   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
1337   void           register_intrinsic(CallGenerator* cg);                    // update fn
1338 
1339 #ifndef PRODUCT
1340   static juint  _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
1341   static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
1342 #endif
1343   // Function calls made by the public function final_graph_reshaping.
1344   // No need to be made public as they are not called elsewhere.
1345   void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
1346   void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop);
1347   void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
1348   void eliminate_redundant_card_marks(Node* n);


< prev index next >