469
470 void* replay_inline_data() const { return _replay_inline_data; }
471
472 // Dump inlining replay data to the stream.
473 void dump_inline_data(outputStream* out);
474
475 private:
476 // Matching, CFG layout, allocation, code generation
477 PhaseCFG* _cfg; // Results of CFG finding
478 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
479 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
480 int _java_calls; // Number of java calls in the method
481 int _inner_loops; // Number of inner loops in the method
482 Matcher* _matcher; // Engine to map ideal to machine instructions
483 PhaseRegAlloc* _regalloc; // Results of register allocation.
484 int _frame_slots; // Size of total frame in stack slots
485 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries
486 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
487 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
488 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
489
490 uint _node_bundling_limit;
491 Bundle* _node_bundling_base; // Information for instruction bundling
492
493 // Instruction bits passed off to the VM
494 int _method_size; // Size of nmethod code segment in bytes
495 CodeBuffer _code_buffer; // Where the code is assembled
496 int _first_block_size; // Size of unvalidated entry point code / OSR poison code
497 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers
498 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code
499 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location)
500 static int _CompiledZap_count; // counter compared against CompileZap[First/Last]
501 BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
502 relocInfo* _scratch_locs_memory; // For temporary code buffers.
503 int _scratch_const_size; // For temporary code buffers.
504 bool _in_scratch_emit_size; // true when in scratch_emit_size.
505
506 public:
507 // Accessors
508
922 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
923 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
924 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
925
926 void inline_incrementally_one(PhaseIterGVN& igvn);
927 void inline_incrementally(PhaseIterGVN& igvn);
928 void inline_string_calls(bool parse_time);
929 void inline_boxing_calls(PhaseIterGVN& igvn);
930
931 // Matching, CFG layout, allocation, code generation
932 PhaseCFG* cfg() { return _cfg; }
933 bool select_24_bit_instr() const { return _select_24_bit_instr; }
934 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
935 bool has_java_calls() const { return _java_calls > 0; }
936 int java_calls() const { return _java_calls; }
937 int inner_loops() const { return _inner_loops; }
938 Matcher* matcher() { return _matcher; }
939 PhaseRegAlloc* regalloc() { return _regalloc; }
940 int frame_slots() const { return _frame_slots; }
941 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
942 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
943 Arena* indexSet_arena() { return _indexSet_arena; }
944 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
945 uint node_bundling_limit() { return _node_bundling_limit; }
946 Bundle* node_bundling_base() { return _node_bundling_base; }
947 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
948 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
949 bool starts_bundle(const Node *n) const;
950 bool need_stack_bang(int frame_size_in_bytes) const;
951 bool need_register_stack_bang() const;
952
953 void set_matcher(Matcher* m) { _matcher = m; }
954 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
955 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
956 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
957
958 // Remember if this compilation changes hardware mode to 24-bit precision
959 void set_24_bit_selection_and_mode(bool selection, bool mode) {
960 _select_24_bit_instr = selection;
961 _in_24_bit_fp_mode = mode;
962 }
963
964 void set_java_calls(int z) { _java_calls = z; }
965 void set_inner_loops(int z) { _inner_loops = z; }
966
967 // Instruction bits passed off to the VM
968 int code_size() { return _method_size; }
969 CodeBuffer* code_buffer() { return &_code_buffer; }
970 int first_block_size() { return _first_block_size; }
971 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); }
|
469
470 void* replay_inline_data() const { return _replay_inline_data; }
471
472 // Dump inlining replay data to the stream.
473 void dump_inline_data(outputStream* out);
474
475 private:
476 // Matching, CFG layout, allocation, code generation
477 PhaseCFG* _cfg; // Results of CFG finding
478 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
479 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
480 int _java_calls; // Number of java calls in the method
481 int _inner_loops; // Number of inner loops in the method
482 Matcher* _matcher; // Engine to map ideal to machine instructions
483 PhaseRegAlloc* _regalloc; // Results of register allocation.
484 int _frame_slots; // Size of total frame in stack slots
485 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries
486 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
487 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
488 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
489 int _interpreter_frame_size;
490
491 uint _node_bundling_limit;
492 Bundle* _node_bundling_base; // Information for instruction bundling
493
494 // Instruction bits passed off to the VM
495 int _method_size; // Size of nmethod code segment in bytes
496 CodeBuffer _code_buffer; // Where the code is assembled
497 int _first_block_size; // Size of unvalidated entry point code / OSR poison code
498 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers
499 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code
500 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location)
501 static int _CompiledZap_count; // counter compared against CompileZap[First/Last]
502 BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
503 relocInfo* _scratch_locs_memory; // For temporary code buffers.
504 int _scratch_const_size; // For temporary code buffers.
505 bool _in_scratch_emit_size; // true when in scratch_emit_size.
506
507 public:
508 // Accessors
509
923 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
924 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
925 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
926
927 void inline_incrementally_one(PhaseIterGVN& igvn);
928 void inline_incrementally(PhaseIterGVN& igvn);
929 void inline_string_calls(bool parse_time);
930 void inline_boxing_calls(PhaseIterGVN& igvn);
931
932 // Matching, CFG layout, allocation, code generation
933 PhaseCFG* cfg() { return _cfg; }
934 bool select_24_bit_instr() const { return _select_24_bit_instr; }
935 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
936 bool has_java_calls() const { return _java_calls > 0; }
937 int java_calls() const { return _java_calls; }
938 int inner_loops() const { return _inner_loops; }
939 Matcher* matcher() { return _matcher; }
940 PhaseRegAlloc* regalloc() { return _regalloc; }
941 int frame_slots() const { return _frame_slots; }
942 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
943 int frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
944 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
945 Arena* indexSet_arena() { return _indexSet_arena; }
946 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
947 uint node_bundling_limit() { return _node_bundling_limit; }
948 Bundle* node_bundling_base() { return _node_bundling_base; }
949 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
950 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
951 bool starts_bundle(const Node *n) const;
952 bool need_stack_bang(int frame_size_in_bytes) const;
953 bool need_register_stack_bang() const;
954
955 void update_interpreter_frame_size(int size) {
956 if (_interpreter_frame_size < size) {
957 _interpreter_frame_size = size;
958 }
959 }
960 int bang_size_in_bytes() const;
961
962 void set_matcher(Matcher* m) { _matcher = m; }
963 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
964 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
965 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
966
967 // Remember if this compilation changes hardware mode to 24-bit precision
968 void set_24_bit_selection_and_mode(bool selection, bool mode) {
969 _select_24_bit_instr = selection;
970 _in_24_bit_fp_mode = mode;
971 }
972
973 void set_java_calls(int z) { _java_calls = z; }
974 void set_inner_loops(int z) { _inner_loops = z; }
975
976 // Instruction bits passed off to the VM
977 int code_size() { return _method_size; }
978 CodeBuffer* code_buffer() { return &_code_buffer; }
979 int first_block_size() { return _first_block_size; }
980 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); }
|