src/share/vm/opto/compile.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/compile.hpp

Print this page
rev 9032 : 8137167: JEP165: Compiler Control: Implementation task
Summary: Compiler Control JEP
Reviewed-by: roland, twisti


 374   bool                  _do_count_invocations;  // True if we generate code to count invocations
 375   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
 376   bool                  _do_vector_loop;        // True if allowed to execute loop in parallel iterations
 377   bool                  _age_code;              // True if we need to profile code age (decrement the aging counter)
 378   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
 379   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 380   bool                  _print_inlining;        // True if we should print inlining for this compilation
 381   bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 382 #ifndef PRODUCT
 383   bool                  _trace_opto_output;
 384   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 385 #endif
 386   bool                  _has_irreducible_loop;  // Found irreducible loops
 387   // JSR 292
 388   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
 389   RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
 390 
 391   // Compilation environment.
 392   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
 393   ciEnv*                _env;                   // CI interface

 394   CompileLog*           _log;                   // from CompilerThread
 395   const char*           _failure_reason;        // for record_failure/failing pattern
 396   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
 397   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
 398   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
 399   GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
 400   ConnectionGraph*      _congraph;
 401 #ifndef PRODUCT
 402   IdealGraphPrinter*    _printer;
 403 #endif
 404 
 405 
 406   // Node management
 407   uint                  _unique;                // Counter for unique Node indices
 408   VectorSet             _dead_node_list;        // Set of dead nodes
 409   uint                  _dead_node_count;       // Number of dead nodes; VectorSet::Size() is O(N).
 410                                                 // So use this to keep count and make the call O(1).
 411   DEBUG_ONLY( Unique_Node_List* _modified_nodes; )  // List of nodes which inputs were modified
 412 
 413   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>


 510 
 511  public:
 512 
 513   outputStream* print_inlining_stream() const {
 514     assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
 515     return _print_inlining_stream;
 516   }
 517 
 518   void print_inlining_update(CallGenerator* cg);
 519   void print_inlining_update_delayed(CallGenerator* cg);
 520   void print_inlining_move_to(CallGenerator* cg);
 521   void print_inlining_assert_ready();
 522   void print_inlining_reset();
 523 
 524   void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
 525     stringStream ss;
 526     CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
 527     print_inlining_stream()->print("%s", ss.as_string());
 528   }
 529 




 530   void log_late_inline(CallGenerator* cg);
 531   void log_inline_id(CallGenerator* cg);
 532   void log_inline_failure(const char* msg);
 533 
 534   void* replay_inline_data() const { return _replay_inline_data; }
 535 
 536   // Dump inlining replay data to the stream.
 537   void dump_inline_data(outputStream* out);
 538 
 539  private:
 540   // Matching, CFG layout, allocation, code generation
 541   PhaseCFG*             _cfg;                   // Results of CFG finding
 542   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
 543   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
 544   int                   _java_calls;            // Number of java calls in the method
 545   int                   _inner_loops;           // Number of inner loops in the method
 546   Matcher*              _matcher;               // Engine to map ideal to machine instructions
 547   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
 548   int                   _frame_slots;           // Size of total frame in stack slots
 549   CodeOffsets           _code_offsets;          // Offsets into the code for various interesting entries


 561   int                   _first_block_size;      // Size of unvalidated entry point code / OSR poison code
 562   ExceptionHandlerTable _handler_table;         // Table of native-code exception handlers
 563   ImplicitExceptionTable _inc_table;            // Table of implicit null checks in native code
 564   OopMapSet*            _oop_map_set;           // Table of oop maps (one for each safepoint location)
 565   static int            _CompiledZap_count;     // counter compared against CompileZap[First/Last]
 566   BufferBlob*           _scratch_buffer_blob;   // For temporary code buffers.
 567   relocInfo*            _scratch_locs_memory;   // For temporary code buffers.
 568   int                   _scratch_const_size;    // For temporary code buffers.
 569   bool                  _in_scratch_emit_size;  // true when in scratch_emit_size.
 570 
 571  public:
 572   // Accessors
 573 
 574   // The Compile instance currently active in this (compiler) thread.
 575   static Compile* current() {
 576     return (Compile*) ciEnv::current()->compiler_data();
 577   }
 578 
 579   // ID for this compilation.  Useful for setting breakpoints in the debugger.
 580   int               compile_id() const          { return _compile_id; }

 581 
 582   // Does this compilation allow instructions to subsume loads?  User
 583   // instructions that subsume a load may result in an unschedulable
 584   // instruction sequence.
 585   bool              subsume_loads() const       { return _subsume_loads; }
 586   /** Do escape analysis. */
 587   bool              do_escape_analysis() const  { return _do_escape_analysis; }
 588   /** Do boxing elimination. */
 589   bool              eliminate_boxing() const    { return _eliminate_boxing; }
 590   /** Do aggressive boxing elimination. */
 591   bool              aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
 592   bool              save_argument_registers() const { return _save_argument_registers; }
 593 
 594 
 595   // Other fixed compilation parameters.
 596   ciMethod*         method() const              { return _method; }
 597   int               entry_bci() const           { return _entry_bci; }
 598   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
 599   bool              is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
 600   const TypeFunc*   tf() const                  { assert(_tf!=NULL, ""); return _tf; }


 654   bool              age_code() const             { return _age_code; }
 655   void          set_age_code(bool z)             { _age_code = z; }
 656   int               AliasLevel() const           { return _AliasLevel; }
 657   bool              print_assembly() const       { return _print_assembly; }
 658   void          set_print_assembly(bool z)       { _print_assembly = z; }
 659   bool              print_inlining() const       { return _print_inlining; }
 660   void          set_print_inlining(bool z)       { _print_inlining = z; }
 661   bool              print_intrinsics() const     { return _print_intrinsics; }
 662   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
 663   RTMState          rtm_state()  const           { return _rtm_state; }
 664   void          set_rtm_state(RTMState s)        { _rtm_state = s; }
 665   bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
 666   bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
 667   uint              max_node_limit() const       { return (uint)_max_node_limit; }
 668   void          set_max_node_limit(uint n)       { _max_node_limit = n; }
 669 
 670   // check the CompilerOracle for special behaviours for this compile
 671   bool          method_has_option(const char * option) {
 672     return method() != NULL && method()->has_option(option);
 673   }
 674   template<typename T>
 675   bool          method_has_option_value(const char * option, T& value) {
 676     return method() != NULL && method()->has_option_value(option, value);
 677   }
 678 #ifndef PRODUCT
 679   bool          trace_opto_output() const       { return _trace_opto_output; }
 680   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
 681   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
 682   int _in_dump_cnt;  // Required for dumping ir nodes.
 683 #endif
 684   bool              has_irreducible_loop() const { return _has_irreducible_loop; }
 685   void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
 686 
 687   // JSR 292
 688   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
 689   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 690 
 691   Ticks _latest_stage_start_counter;
 692 
 693   void begin_method() {
 694 #ifndef PRODUCT
 695     if (_printer && _printer->should_print(_method)) {
 696       _printer->begin_method(this);
 697     }
 698 #endif
 699     C->_latest_stage_start_counter.stamp();
 700   }
 701 
 702   void print_method(CompilerPhaseType cpt, int level = 1) {
 703     EventCompilerPhase event;
 704     if (event.should_commit()) {
 705       event.set_starttime(C->_latest_stage_start_counter);
 706       event.set_phase((u1) cpt);
 707       event.set_compileID(C->_compile_id);
 708       event.set_phaseLevel(level);
 709       event.commit();
 710     }
 711 
 712 
 713 #ifndef PRODUCT
 714     if (_printer && _printer->should_print(_method)) {
 715       _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level);
 716     }
 717 #endif
 718     C->_latest_stage_start_counter.stamp();
 719   }
 720 
 721   void end_method(int level = 1) {
 722     EventCompilerPhase event;
 723     if (event.should_commit()) {
 724       event.set_starttime(C->_latest_stage_start_counter);
 725       event.set_phase((u1) PHASE_END);
 726       event.set_compileID(C->_compile_id);
 727       event.set_phaseLevel(level);
 728       event.commit();
 729     }
 730 #ifndef PRODUCT
 731     if (_printer && _printer->should_print(_method)) {
 732       _printer->end_method();
 733     }
 734 #endif
 735   }
 736 
 737   int           macro_count()             const { return _macro_nodes->length(); }
 738   int           predicate_count()         const { return _predicate_opaqs->length();}
 739   int           expensive_count()         const { return _expensive_nodes->length(); }
 740   Node*         macro_node(int idx)       const { return _macro_nodes->at(idx); }
 741   Node*         predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
 742   Node*         expensive_node(int idx)   const { return _expensive_nodes->at(idx); }
 743   ConnectionGraph* congraph()                   { return _congraph;}
 744   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
 745   void add_macro_node(Node * n) {
 746     //assert(n->is_macro(), "must be a macro node");
 747     assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
 748     _macro_nodes->append(n);
 749   }
 750   void remove_macro_node(Node * n) {
 751     // this function may be called twice for a node so check


1090   void          set_scratch_locs_memory(relocInfo* b)  { _scratch_locs_memory = b; }
1091 
1092   // emit to scratch blob, report resulting size
1093   uint              scratch_emit_size(const Node* n);
1094   void       set_in_scratch_emit_size(bool x)   {        _in_scratch_emit_size = x; }
1095   bool           in_scratch_emit_size() const   { return _in_scratch_emit_size;     }
1096 
1097   enum ScratchBufferBlob {
1098     MAX_inst_size       = 1024,
1099     MAX_locs_size       = 128, // number of relocInfo elements
1100     MAX_const_size      = 128,
1101     MAX_stubs_size      = 128
1102   };
1103 
1104   // Major entry point.  Given a Scope, compile the associated method.
1105   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
1106   // replacement, entry_bci indicates the bytecode for which to compile a
1107   // continuation.
1108   Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
1109           int entry_bci, bool subsume_loads, bool do_escape_analysis,
1110           bool eliminate_boxing);
1111 
1112   // Second major entry point.  From the TypeFunc signature, generate code
1113   // to pass arguments from the Java calling convention to the C calling
1114   // convention.
1115   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1116           address stub_function, const char *stub_name,
1117           int is_fancy_jump, bool pass_tls,
1118           bool save_arg_registers, bool return_pc);
1119 
1120   // From the TypeFunc signature, generate code to pass arguments
1121   // from Compiled calling convention to Interpreter's calling convention
1122   void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
1123 
1124   // From the TypeFunc signature, generate code to pass arguments
1125   // from Interpreter's calling convention to Compiler's calling convention
1126   void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
1127 
1128   // Are we compiling a method?
1129   bool has_method() { return method() != NULL; }
1130 
1131   // Maybe print some information about this compile.
1132   void print_compile_messages();
1133 
1134   // Final graph reshaping, a post-pass after the regular optimizer is done.
1135   bool final_graph_reshaping();
1136 
1137   // returns true if adr is completely contained in the given alias category
1138   bool must_alias(const TypePtr* adr, int alias_idx);




 374   bool                  _do_count_invocations;  // True if we generate code to count invocations
 375   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
 376   bool                  _do_vector_loop;        // True if allowed to execute loop in parallel iterations
 377   bool                  _age_code;              // True if we need to profile code age (decrement the aging counter)
 378   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
 379   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 380   bool                  _print_inlining;        // True if we should print inlining for this compilation
 381   bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 382 #ifndef PRODUCT
 383   bool                  _trace_opto_output;
 384   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 385 #endif
 386   bool                  _has_irreducible_loop;  // Found irreducible loops
 387   // JSR 292
 388   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
 389   RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
 390 
 391   // Compilation environment.
 392   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
 393   ciEnv*                _env;                   // CI interface
 394   DirectiveSet*         _directive;             // Compiler directive
 395   CompileLog*           _log;                   // from CompilerThread
 396   const char*           _failure_reason;        // for record_failure/failing pattern
 397   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
 398   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
 399   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
 400   GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
 401   ConnectionGraph*      _congraph;
 402 #ifndef PRODUCT
 403   IdealGraphPrinter*    _printer;
 404 #endif
 405 
 406 
 407   // Node management
 408   uint                  _unique;                // Counter for unique Node indices
 409   VectorSet             _dead_node_list;        // Set of dead nodes
 410   uint                  _dead_node_count;       // Number of dead nodes; VectorSet::Size() is O(N).
 411                                                 // So use this to keep count and make the call O(1).
 412   DEBUG_ONLY( Unique_Node_List* _modified_nodes; )  // List of nodes which inputs were modified
 413 
 414   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>


 511 
 512  public:
 513 
 514   outputStream* print_inlining_stream() const {
 515     assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
 516     return _print_inlining_stream;
 517   }
 518 
 519   void print_inlining_update(CallGenerator* cg);
 520   void print_inlining_update_delayed(CallGenerator* cg);
 521   void print_inlining_move_to(CallGenerator* cg);
 522   void print_inlining_assert_ready();
 523   void print_inlining_reset();
 524 
 525   void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
 526     stringStream ss;
 527     CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
 528     print_inlining_stream()->print("%s", ss.as_string());
 529   }
 530 
 531 #ifndef PRODUCT
 532   IdealGraphPrinter* printer() { return _printer; }
 533 #endif
 534 
 535   void log_late_inline(CallGenerator* cg);
 536   void log_inline_id(CallGenerator* cg);
 537   void log_inline_failure(const char* msg);
 538 
 539   void* replay_inline_data() const { return _replay_inline_data; }
 540 
 541   // Dump inlining replay data to the stream.
 542   void dump_inline_data(outputStream* out);
 543 
 544  private:
 545   // Matching, CFG layout, allocation, code generation
 546   PhaseCFG*             _cfg;                   // Results of CFG finding
 547   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
 548   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
 549   int                   _java_calls;            // Number of java calls in the method
 550   int                   _inner_loops;           // Number of inner loops in the method
 551   Matcher*              _matcher;               // Engine to map ideal to machine instructions
 552   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
 553   int                   _frame_slots;           // Size of total frame in stack slots
 554   CodeOffsets           _code_offsets;          // Offsets into the code for various interesting entries


 566   int                   _first_block_size;      // Size of unvalidated entry point code / OSR poison code
 567   ExceptionHandlerTable _handler_table;         // Table of native-code exception handlers
 568   ImplicitExceptionTable _inc_table;            // Table of implicit null checks in native code
 569   OopMapSet*            _oop_map_set;           // Table of oop maps (one for each safepoint location)
 570   static int            _CompiledZap_count;     // counter compared against CompileZap[First/Last]
 571   BufferBlob*           _scratch_buffer_blob;   // For temporary code buffers.
 572   relocInfo*            _scratch_locs_memory;   // For temporary code buffers.
 573   int                   _scratch_const_size;    // For temporary code buffers.
 574   bool                  _in_scratch_emit_size;  // true when in scratch_emit_size.
 575 
 576  public:
 577   // Accessors
 578 
 579   // The Compile instance currently active in this (compiler) thread.
 580   static Compile* current() {
 581     return (Compile*) ciEnv::current()->compiler_data();
 582   }
 583 
 584   // ID for this compilation.  Useful for setting breakpoints in the debugger.
 585   int               compile_id() const          { return _compile_id; }
 586   DirectiveSet*     directive() const           { return _directive; }
 587 
 588   // Does this compilation allow instructions to subsume loads?  User
 589   // instructions that subsume a load may result in an unschedulable
 590   // instruction sequence.
 591   bool              subsume_loads() const       { return _subsume_loads; }
 592   /** Do escape analysis. */
 593   bool              do_escape_analysis() const  { return _do_escape_analysis; }
 594   /** Do boxing elimination. */
 595   bool              eliminate_boxing() const    { return _eliminate_boxing; }
 596   /** Do aggressive boxing elimination. */
 597   bool              aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
 598   bool              save_argument_registers() const { return _save_argument_registers; }
 599 
 600 
 601   // Other fixed compilation parameters.
 602   ciMethod*         method() const              { return _method; }
 603   int               entry_bci() const           { return _entry_bci; }
 604   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
 605   bool              is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
 606   const TypeFunc*   tf() const                  { assert(_tf!=NULL, ""); return _tf; }


 660   bool              age_code() const             { return _age_code; }
 661   void          set_age_code(bool z)             { _age_code = z; }
 662   int               AliasLevel() const           { return _AliasLevel; }
 663   bool              print_assembly() const       { return _print_assembly; }
 664   void          set_print_assembly(bool z)       { _print_assembly = z; }
 665   bool              print_inlining() const       { return _print_inlining; }
 666   void          set_print_inlining(bool z)       { _print_inlining = z; }
 667   bool              print_intrinsics() const     { return _print_intrinsics; }
 668   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
 669   RTMState          rtm_state()  const           { return _rtm_state; }
 670   void          set_rtm_state(RTMState s)        { _rtm_state = s; }
 671   bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
 672   bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
 673   uint              max_node_limit() const       { return (uint)_max_node_limit; }
 674   void          set_max_node_limit(uint n)       { _max_node_limit = n; }
 675 
 676   // check the CompilerOracle for special behaviours for this compile
 677   bool          method_has_option(const char * option) {
 678     return method() != NULL && method()->has_option(option);
 679   }
 680 



 681 #ifndef PRODUCT
 682   bool          trace_opto_output() const       { return _trace_opto_output; }
 683   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
 684   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
 685   int _in_dump_cnt;  // Required for dumping ir nodes.
 686 #endif
 687   bool              has_irreducible_loop() const { return _has_irreducible_loop; }
 688   void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
 689 
 690   // JSR 292
 691   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
 692   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 693 
 694   Ticks _latest_stage_start_counter;
 695 
 696   void begin_method() {
 697 #ifndef PRODUCT
 698     if (_printer && _printer->should_print(1)) {
 699       _printer->begin_method();
 700     }
 701 #endif
 702     C->_latest_stage_start_counter.stamp();
 703   }
 704 
 705   void print_method(CompilerPhaseType cpt, int level = 1) {
 706     EventCompilerPhase event;
 707     if (event.should_commit()) {
 708       event.set_starttime(C->_latest_stage_start_counter);
 709       event.set_phase((u1) cpt);
 710       event.set_compileID(C->_compile_id);
 711       event.set_phaseLevel(level);
 712       event.commit();
 713     }
 714 
 715 
 716 #ifndef PRODUCT
 717     if (_printer && _printer->should_print(level)) {
 718       _printer->print_method(CompilerPhaseTypeHelper::to_string(cpt), level);
 719     }
 720 #endif
 721     C->_latest_stage_start_counter.stamp();
 722   }
 723 
 724   void end_method(int level = 1) {
 725     EventCompilerPhase event;
 726     if (event.should_commit()) {
 727       event.set_starttime(C->_latest_stage_start_counter);
 728       event.set_phase((u1) PHASE_END);
 729       event.set_compileID(C->_compile_id);
 730       event.set_phaseLevel(level);
 731       event.commit();
 732     }
 733 #ifndef PRODUCT
 734     if (_printer && _printer->should_print(level)) {
 735       _printer->end_method();
 736     }
 737 #endif
 738   }
 739 
 740   int           macro_count()             const { return _macro_nodes->length(); }
 741   int           predicate_count()         const { return _predicate_opaqs->length();}
 742   int           expensive_count()         const { return _expensive_nodes->length(); }
 743   Node*         macro_node(int idx)       const { return _macro_nodes->at(idx); }
 744   Node*         predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
 745   Node*         expensive_node(int idx)   const { return _expensive_nodes->at(idx); }
 746   ConnectionGraph* congraph()                   { return _congraph;}
 747   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
 748   void add_macro_node(Node * n) {
 749     //assert(n->is_macro(), "must be a macro node");
 750     assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
 751     _macro_nodes->append(n);
 752   }
 753   void remove_macro_node(Node * n) {
 754     // this function may be called twice for a node so check


1093   void          set_scratch_locs_memory(relocInfo* b)  { _scratch_locs_memory = b; }
1094 
1095   // emit to scratch blob, report resulting size
1096   uint              scratch_emit_size(const Node* n);
1097   void       set_in_scratch_emit_size(bool x)   {        _in_scratch_emit_size = x; }
1098   bool           in_scratch_emit_size() const   { return _in_scratch_emit_size;     }
1099 
1100   enum ScratchBufferBlob {
1101     MAX_inst_size       = 1024,
1102     MAX_locs_size       = 128, // number of relocInfo elements
1103     MAX_const_size      = 128,
1104     MAX_stubs_size      = 128
1105   };
1106 
1107   // Major entry point.  Given a Scope, compile the associated method.
1108   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
1109   // replacement, entry_bci indicates the bytecode for which to compile a
1110   // continuation.
1111   Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
1112           int entry_bci, bool subsume_loads, bool do_escape_analysis,
1113           bool eliminate_boxing, DirectiveSet* directive);
1114 
1115   // Second major entry point.  From the TypeFunc signature, generate code
1116   // to pass arguments from the Java calling convention to the C calling
1117   // convention.
1118   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1119           address stub_function, const char *stub_name,
1120           int is_fancy_jump, bool pass_tls,
1121           bool save_arg_registers, bool return_pc, DirectiveSet* directive);
1122 
1123   // From the TypeFunc signature, generate code to pass arguments
1124   // from Compiled calling convention to Interpreter's calling convention
1125   void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
1126 
1127   // From the TypeFunc signature, generate code to pass arguments
1128   // from Interpreter's calling convention to Compiler's calling convention
1129   void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
1130 
1131   // Are we compiling a method?
1132   bool has_method() { return method() != NULL; }
1133 
1134   // Maybe print some information about this compile.
1135   void print_compile_messages();
1136 
1137   // Final graph reshaping, a post-pass after the regular optimizer is done.
1138   bool final_graph_reshaping();
1139 
1140   // returns true if adr is completely contained in the given alias category
1141   bool must_alias(const TypePtr* adr, int alias_idx);


src/share/vm/opto/compile.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File