245 Constant add(MachConstantNode* n, jfloat f) {
246 jvalue value; value.f = f;
247 return add(n, T_FLOAT, value);
248 }
249 Constant add(MachConstantNode* n, jdouble d) {
250 jvalue value; value.d = d;
251 return add(n, T_DOUBLE, value);
252 }
253
254 // Jump-table
255 Constant add_jump_table(MachConstantNode* n);
256 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
257 };
258
259 private:
260 // Fixed parameters to this compilation.
261 const int _compile_id;
262 const bool _save_argument_registers; // save/restore arg regs for trampolines
263 const bool _subsume_loads; // Load can be matched as part of a larger op.
264 const bool _do_escape_analysis; // Do escape analysis.
265 ciMethod* _method; // The method being compiled.
266 int _entry_bci; // entry bci for osr methods.
267 const TypeFunc* _tf; // My kind of signature
268 InlineTree* _ilt; // Ditto (temporary).
269 address _stub_function; // VM entry for stub being compiled, or NULL
270 const char* _stub_name; // Name of stub or adapter being compiled, or NULL
271 address _stub_entry_point; // Compile code entry for generated stub, or NULL
272
273 // Control of this compilation.
274 int _num_loop_opts; // Number of iterations for doing loop optimiztions
275 int _max_inline_size; // Max inline size for this compilation
276 int _freq_inline_size; // Max hot method inline size for this compilation
277 int _fixed_slots; // count of frame slots not allocated by the register
278 // allocator i.e. locks, original deopt pc, etc.
279 // For deopt
280 int _orig_pc_slot;
281 int _orig_pc_slot_offset_in_bytes;
282
283 int _major_progress; // Count of something big happening
284 bool _inlining_progress; // progress doing incremental inlining?
285 bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
286 bool _has_loops; // True if the method _may_ have some loops
287 bool _has_split_ifs; // True if the method _may_ have some split-if
288 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
289 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
290 int _max_vector_size; // Maximum size of generated vectors
291 uint _trap_hist[trapHistLength]; // Cumulative traps
292 bool _trap_can_recompile; // Have we emitted a recompiling trap?
293 uint _decompile_count; // Cumulative decompilation counts.
294 bool _do_inlining; // True if we intend to do inlining
295 bool _do_scheduling; // True if we intend to do scheduling
296 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
297 bool _do_count_invocations; // True if we generate code to count invocations
298 bool _do_method_data_update; // True if we generate code to update MethodData*s
299 int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
300 bool _print_assembly; // True if we should dump assembly code for this compilation
301 #ifndef PRODUCT
302 bool _trace_opto_output;
303 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
304 #endif
305
306 // JSR 292
307 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
308
309 // Compilation environment.
358 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
359 Dict* _type_dict; // Intern table
360 void* _type_hwm; // Last allocation (see Type::operator new/delete)
361 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
362 ciMethod* _last_tf_m; // Cache for
363 const TypeFunc* _last_tf; // TypeFunc::make
364 AliasType** _alias_types; // List of alias types seen so far.
365 int _num_alias_types; // Logical length of _alias_types
366 int _max_alias_types; // Physical length of _alias_types
367 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
368
369 // Parsing, optimization
370 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
371 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
372 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
373
374 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
375 // main parsing has finished.
376 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
377
378 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
379 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
380
381
382 // Inlining may not happen in parse order which would make
383 // PrintInlining output confusing. Keep track of PrintInlining
384 // pieces in order.
385 class PrintInliningBuffer : public ResourceObj {
386 private:
387 CallGenerator* _cg;
388 stringStream* _ss;
389
390 public:
391 PrintInliningBuffer()
392 : _cg(NULL) { _ss = new stringStream(); }
393
394 stringStream* ss() const { return _ss; }
395 CallGenerator* cg() const { return _cg; }
396 void set_cg(CallGenerator* cg) { _cg = cg; }
397 };
471 int _scratch_const_size; // For temporary code buffers.
472 bool _in_scratch_emit_size; // true when in scratch_emit_size.
473
474 public:
475 // Accessors
476
477 // The Compile instance currently active in this (compiler) thread.
478 static Compile* current() {
479 return (Compile*) ciEnv::current()->compiler_data();
480 }
481
482 // ID for this compilation. Useful for setting breakpoints in the debugger.
483 int compile_id() const { return _compile_id; }
484
485 // Does this compilation allow instructions to subsume loads? User
486 // instructions that subsume a load may result in an unschedulable
487 // instruction sequence.
488 bool subsume_loads() const { return _subsume_loads; }
489 // Do escape analysis.
490 bool do_escape_analysis() const { return _do_escape_analysis; }
491 bool save_argument_registers() const { return _save_argument_registers; }
492
493
494 // Other fixed compilation parameters.
495 ciMethod* method() const { return _method; }
496 int entry_bci() const { return _entry_bci; }
497 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
498 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
499 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; }
500 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; }
501 InlineTree* ilt() const { return _ilt; }
502 address stub_function() const { return _stub_function; }
503 const char* stub_name() const { return _stub_name; }
504 address stub_entry_point() const { return _stub_entry_point; }
505
506 // Control of this compilation.
507 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
508 void set_fixed_slots(int n) { _fixed_slots = n; }
509 int major_progress() const { return _major_progress; }
510 void set_inlining_progress(bool z) { _inlining_progress = z; }
511 int inlining_progress() const { return _inlining_progress; }
512 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
513 int inlining_incrementally() const { return _inlining_incrementally; }
514 void set_major_progress() { _major_progress++; }
515 void clear_major_progress() { _major_progress = 0; }
516 int num_loop_opts() const { return _num_loop_opts; }
517 void set_num_loop_opts(int n) { _num_loop_opts = n; }
518 int max_inline_size() const { return _max_inline_size; }
519 void set_freq_inline_size(int n) { _freq_inline_size = n; }
520 int freq_inline_size() const { return _freq_inline_size; }
521 void set_max_inline_size(int n) { _max_inline_size = n; }
522 bool has_loops() const { return _has_loops; }
523 void set_has_loops(bool z) { _has_loops = z; }
524 bool has_split_ifs() const { return _has_split_ifs; }
525 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
526 bool has_unsafe_access() const { return _has_unsafe_access; }
527 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
528 bool has_stringbuilder() const { return _has_stringbuilder; }
529 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
530 int max_vector_size() const { return _max_vector_size; }
531 void set_max_vector_size(int s) { _max_vector_size = s; }
532 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
533 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
534 bool trap_can_recompile() const { return _trap_can_recompile; }
535 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
536 uint decompile_count() const { return _decompile_count; }
537 void set_decompile_count(uint c) { _decompile_count = c; }
538 bool allow_range_check_smearing() const;
539 bool do_inlining() const { return _do_inlining; }
540 void set_do_inlining(bool z) { _do_inlining = z; }
541 bool do_scheduling() const { return _do_scheduling; }
542 void set_do_scheduling(bool z) { _do_scheduling = z; }
543 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
544 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
545 bool do_count_invocations() const{ return _do_count_invocations; }
546 void set_do_count_invocations(bool z){ _do_count_invocations = z; }
547 bool do_method_data_update() const { return _do_method_data_update; }
548 void set_do_method_data_update(bool z) { _do_method_data_update = z; }
549 int AliasLevel() const { return _AliasLevel; }
562 // JSR 292
563 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
564 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
565
566 void begin_method() {
567 #ifndef PRODUCT
568 if (_printer) _printer->begin_method(this);
569 #endif
570 }
571 void print_method(const char * name, int level = 1) {
572 #ifndef PRODUCT
573 if (_printer) _printer->print_method(this, name, level);
574 #endif
575 }
576 void end_method() {
577 #ifndef PRODUCT
578 if (_printer) _printer->end_method();
579 #endif
580 }
581
582 int macro_count() { return _macro_nodes->length(); }
583 int predicate_count() { return _predicate_opaqs->length();}
584 int expensive_count() { return _expensive_nodes->length(); }
585 Node* macro_node(int idx) { return _macro_nodes->at(idx); }
586 Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
587 Node* expensive_node(int idx) { return _expensive_nodes->at(idx); }
588 ConnectionGraph* congraph() { return _congraph;}
589 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
590 void add_macro_node(Node * n) {
591 //assert(n->is_macro(), "must be a macro node");
592 assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
593 _macro_nodes->append(n);
594 }
595 void remove_macro_node(Node * n) {
596 // this function may be called twice for a node so check
597 // that the node is in the array before attempting to remove it
598 if (_macro_nodes->contains(n))
599 _macro_nodes->remove(n);
600 // remove from _predicate_opaqs list also if it is there
601 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
602 _predicate_opaqs->remove(n);
603 }
604 }
605 void add_expensive_node(Node * n);
606 void remove_expensive_node(Node * n) {
607 if (_expensive_nodes->contains(n)) {
749 _last_tf = tf;
750 }
751
752 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
753 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
754 bool have_alias_type(const TypePtr* adr_type);
755 AliasType* alias_type(ciField* field);
756
757 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
758 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
759 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
760
761 // Building nodes
762 void rethrow_exceptions(JVMState* jvms);
763 void return_values(JVMState* jvms);
764 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
765
766 // Decide how to build a call.
767 // The profile factor is a discount to apply to this site's interp. profile.
768 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
769 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
770
771 // Helper functions to identify inlining potential at call-site
772 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
773 ciMethod* callee, const TypeOopPtr* receiver_type,
774 bool is_virtual,
775 bool &call_does_dispatch, int &vtable_index);
776 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
777 ciMethod* callee, const TypeOopPtr* receiver_type);
778
779 // Report if there were too many traps at a current method and bci.
780 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
781 // If there is no MDO at all, report no trap unless told to assume it.
782 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
783 // This version, unspecific to a particular bci, asks if
784 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
785 bool too_many_traps(Deoptimization::DeoptReason reason,
786 // Privately used parameter for logging:
787 ciMethodData* logmd = NULL);
788 // Report if there were too many recompiles at a method and bci.
789 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
805 void remove_useless_nodes (Unique_Node_List &useful);
806
807 WarmCallInfo* warm_calls() const { return _warm_calls; }
808 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
809 WarmCallInfo* pop_warm_call();
810
811 // Record this CallGenerator for inlining at the end of parsing.
812 void add_late_inline(CallGenerator* cg) {
813 _late_inlines.insert_before(_late_inlines_pos, cg);
814 _late_inlines_pos++;
815 }
816
817 void prepend_late_inline(CallGenerator* cg) {
818 _late_inlines.insert_before(0, cg);
819 }
820
821 void add_string_late_inline(CallGenerator* cg) {
822 _string_late_inlines.push(cg);
823 }
824
825 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
826
827 void dump_inlining();
828
829 bool over_inlining_cutoff() const {
830 if (!inlining_incrementally()) {
831 return unique() > (uint)NodeCountInliningCutoff;
832 } else {
833 return live_nodes() > (uint)LiveNodeCountInliningCutoff;
834 }
835 }
836
837 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
838 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
839 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
840
841 void inline_incrementally_one(PhaseIterGVN& igvn);
842 void inline_incrementally(PhaseIterGVN& igvn);
843 void inline_string_calls(bool parse_time);
844
845 // Matching, CFG layout, allocation, code generation
846 PhaseCFG* cfg() { return _cfg; }
847 bool select_24_bit_instr() const { return _select_24_bit_instr; }
848 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
849 bool has_java_calls() const { return _java_calls > 0; }
850 int java_calls() const { return _java_calls; }
851 int inner_loops() const { return _inner_loops; }
852 Matcher* matcher() { return _matcher; }
853 PhaseRegAlloc* regalloc() { return _regalloc; }
854 int frame_slots() const { return _frame_slots; }
855 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
856 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
857 Arena* indexSet_arena() { return _indexSet_arena; }
858 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
859 uint node_bundling_limit() { return _node_bundling_limit; }
860 Bundle* node_bundling_base() { return _node_bundling_base; }
861 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
862 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
863 bool starts_bundle(const Node *n) const;
896 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
897 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
898
899 // emit to scratch blob, report resulting size
900 uint scratch_emit_size(const Node* n);
901 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; }
902 bool in_scratch_emit_size() const { return _in_scratch_emit_size; }
903
904 enum ScratchBufferBlob {
905 MAX_inst_size = 1024,
906 MAX_locs_size = 128, // number of relocInfo elements
907 MAX_const_size = 128,
908 MAX_stubs_size = 128
909 };
910
911 // Major entry point. Given a Scope, compile the associated method.
912 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
913 // replacement, entry_bci indicates the bytecode for which to compile a
914 // continuation.
915 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
916 int entry_bci, bool subsume_loads, bool do_escape_analysis);
917
918 // Second major entry point. From the TypeFunc signature, generate code
919 // to pass arguments from the Java calling convention to the C calling
920 // convention.
921 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
922 address stub_function, const char *stub_name,
923 int is_fancy_jump, bool pass_tls,
924 bool save_arg_registers, bool return_pc);
925
926 // From the TypeFunc signature, generate code to pass arguments
927 // from Compiled calling convention to Interpreter's calling convention
928 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
929
930 // From the TypeFunc signature, generate code to pass arguments
931 // from Interpreter's calling convention to Compiler's calling convention
932 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
933
934 // Are we compiling a method?
935 bool has_method() { return method() != NULL; }
936
|
245 Constant add(MachConstantNode* n, jfloat f) {
246 jvalue value; value.f = f;
247 return add(n, T_FLOAT, value);
248 }
249 Constant add(MachConstantNode* n, jdouble d) {
250 jvalue value; value.d = d;
251 return add(n, T_DOUBLE, value);
252 }
253
254 // Jump-table
255 Constant add_jump_table(MachConstantNode* n);
256 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
257 };
258
259 private:
260 // Fixed parameters to this compilation.
261 const int _compile_id;
262 const bool _save_argument_registers; // save/restore arg regs for trampolines
263 const bool _subsume_loads; // Load can be matched as part of a larger op.
264 const bool _do_escape_analysis; // Do escape analysis.
265 const bool _eliminate_autobox; // Do autoboxing elimination.
266 ciMethod* _method; // The method being compiled.
267 int _entry_bci; // entry bci for osr methods.
268 const TypeFunc* _tf; // My kind of signature
269 InlineTree* _ilt; // Ditto (temporary).
270 address _stub_function; // VM entry for stub being compiled, or NULL
271 const char* _stub_name; // Name of stub or adapter being compiled, or NULL
272 address _stub_entry_point; // Compile code entry for generated stub, or NULL
273
274 // Control of this compilation.
275 int _num_loop_opts; // Number of iterations for doing loop optimiztions
276 int _max_inline_size; // Max inline size for this compilation
277 int _freq_inline_size; // Max hot method inline size for this compilation
278 int _fixed_slots; // count of frame slots not allocated by the register
279 // allocator i.e. locks, original deopt pc, etc.
280 // For deopt
281 int _orig_pc_slot;
282 int _orig_pc_slot_offset_in_bytes;
283
284 int _major_progress; // Count of something big happening
285 bool _inlining_progress; // progress doing incremental inlining?
286 bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
287 bool _has_loops; // True if the method _may_ have some loops
288 bool _has_split_ifs; // True if the method _may_ have some split-if
289 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
290 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
291 bool _has_boxed_value; // True if a boxed object is allocated
292 int _max_vector_size; // Maximum size of generated vectors
293 uint _trap_hist[trapHistLength]; // Cumulative traps
294 bool _trap_can_recompile; // Have we emitted a recompiling trap?
295 uint _decompile_count; // Cumulative decompilation counts.
296 bool _do_inlining; // True if we intend to do inlining
297 bool _do_scheduling; // True if we intend to do scheduling
298 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
299 bool _do_count_invocations; // True if we generate code to count invocations
300 bool _do_method_data_update; // True if we generate code to update MethodData*s
301 int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
302 bool _print_assembly; // True if we should dump assembly code for this compilation
303 #ifndef PRODUCT
304 bool _trace_opto_output;
305 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
306 #endif
307
308 // JSR 292
309 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
310
311 // Compilation environment.
360 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
361 Dict* _type_dict; // Intern table
362 void* _type_hwm; // Last allocation (see Type::operator new/delete)
363 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
364 ciMethod* _last_tf_m; // Cache for
365 const TypeFunc* _last_tf; // TypeFunc::make
366 AliasType** _alias_types; // List of alias types seen so far.
367 int _num_alias_types; // Logical length of _alias_types
368 int _max_alias_types; // Physical length of _alias_types
369 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
370
371 // Parsing, optimization
372 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
373 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
374 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
375
376 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
377 // main parsing has finished.
378 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
379
380 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
381
382 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
383 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
384
385
386 // Inlining may not happen in parse order which would make
387 // PrintInlining output confusing. Keep track of PrintInlining
388 // pieces in order.
389 class PrintInliningBuffer : public ResourceObj {
390 private:
391 CallGenerator* _cg;
392 stringStream* _ss;
393
394 public:
395 PrintInliningBuffer()
396 : _cg(NULL) { _ss = new stringStream(); }
397
398 stringStream* ss() const { return _ss; }
399 CallGenerator* cg() const { return _cg; }
400 void set_cg(CallGenerator* cg) { _cg = cg; }
401 };
475 int _scratch_const_size; // For temporary code buffers.
476 bool _in_scratch_emit_size; // true when in scratch_emit_size.
477
478 public:
479 // Accessors
480
481 // The Compile instance currently active in this (compiler) thread.
482 static Compile* current() {
483 return (Compile*) ciEnv::current()->compiler_data();
484 }
485
486 // ID for this compilation. Useful for setting breakpoints in the debugger.
487 int compile_id() const { return _compile_id; }
488
489 // Does this compilation allow instructions to subsume loads? User
490 // instructions that subsume a load may result in an unschedulable
491 // instruction sequence.
492 bool subsume_loads() const { return _subsume_loads; }
493 // Do escape analysis.
494 bool do_escape_analysis() const { return _do_escape_analysis; }
495 // Do autoboxing elimination.
496 bool eliminate_autobox() const { return _eliminate_autobox; }
497 // Do aggressive boxing elimination.
498 bool aggressive_unboxing() const { return _eliminate_autobox && AggressiveUnboxing; }
499 bool save_argument_registers() const { return _save_argument_registers; }
500
501
502 // Other fixed compilation parameters.
503 ciMethod* method() const { return _method; }
504 int entry_bci() const { return _entry_bci; }
505 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
506 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
507 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; }
508 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; }
509 InlineTree* ilt() const { return _ilt; }
510 address stub_function() const { return _stub_function; }
511 const char* stub_name() const { return _stub_name; }
512 address stub_entry_point() const { return _stub_entry_point; }
513
514 // Control of this compilation.
515 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
516 void set_fixed_slots(int n) { _fixed_slots = n; }
517 int major_progress() const { return _major_progress; }
518 void set_inlining_progress(bool z) { _inlining_progress = z; }
519 int inlining_progress() const { return _inlining_progress; }
520 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
521 int inlining_incrementally() const { return _inlining_incrementally; }
522 void set_major_progress() { _major_progress++; }
523 void clear_major_progress() { _major_progress = 0; }
524 int num_loop_opts() const { return _num_loop_opts; }
525 void set_num_loop_opts(int n) { _num_loop_opts = n; }
526 int max_inline_size() const { return _max_inline_size; }
527 void set_freq_inline_size(int n) { _freq_inline_size = n; }
528 int freq_inline_size() const { return _freq_inline_size; }
529 void set_max_inline_size(int n) { _max_inline_size = n; }
530 bool has_loops() const { return _has_loops; }
531 void set_has_loops(bool z) { _has_loops = z; }
532 bool has_split_ifs() const { return _has_split_ifs; }
533 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
534 bool has_unsafe_access() const { return _has_unsafe_access; }
535 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
536 bool has_stringbuilder() const { return _has_stringbuilder; }
537 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
538 bool has_boxed_value() const { return _has_boxed_value; }
539 void set_has_boxed_value(bool z) { _has_boxed_value = z; }
540 int max_vector_size() const { return _max_vector_size; }
541 void set_max_vector_size(int s) { _max_vector_size = s; }
542 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
543 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
544 bool trap_can_recompile() const { return _trap_can_recompile; }
545 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
546 uint decompile_count() const { return _decompile_count; }
547 void set_decompile_count(uint c) { _decompile_count = c; }
548 bool allow_range_check_smearing() const;
549 bool do_inlining() const { return _do_inlining; }
550 void set_do_inlining(bool z) { _do_inlining = z; }
551 bool do_scheduling() const { return _do_scheduling; }
552 void set_do_scheduling(bool z) { _do_scheduling = z; }
553 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
554 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
555 bool do_count_invocations() const{ return _do_count_invocations; }
556 void set_do_count_invocations(bool z){ _do_count_invocations = z; }
557 bool do_method_data_update() const { return _do_method_data_update; }
558 void set_do_method_data_update(bool z) { _do_method_data_update = z; }
559 int AliasLevel() const { return _AliasLevel; }
572 // JSR 292
573 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
574 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
575
576 void begin_method() {
577 #ifndef PRODUCT
578 if (_printer) _printer->begin_method(this);
579 #endif
580 }
581 void print_method(const char * name, int level = 1) {
582 #ifndef PRODUCT
583 if (_printer) _printer->print_method(this, name, level);
584 #endif
585 }
586 void end_method() {
587 #ifndef PRODUCT
588 if (_printer) _printer->end_method();
589 #endif
590 }
591
592 int macro_count() const { return _macro_nodes->length(); }
593 int predicate_count() const { return _predicate_opaqs->length();}
594 int expensive_count() const { return _expensive_nodes->length(); }
595 Node* macro_node(int idx) const { return _macro_nodes->at(idx); }
596 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
597 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); }
598 ConnectionGraph* congraph() { return _congraph;}
599 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
600 void add_macro_node(Node * n) {
601 //assert(n->is_macro(), "must be a macro node");
602 assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
603 _macro_nodes->append(n);
604 }
605 void remove_macro_node(Node * n) {
606 // this function may be called twice for a node so check
607 // that the node is in the array before attempting to remove it
608 if (_macro_nodes->contains(n))
609 _macro_nodes->remove(n);
610 // remove from _predicate_opaqs list also if it is there
611 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
612 _predicate_opaqs->remove(n);
613 }
614 }
615 void add_expensive_node(Node * n);
616 void remove_expensive_node(Node * n) {
617 if (_expensive_nodes->contains(n)) {
759 _last_tf = tf;
760 }
761
762 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
763 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
764 bool have_alias_type(const TypePtr* adr_type);
765 AliasType* alias_type(ciField* field);
766
767 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
768 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
769 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
770
771 // Building nodes
772 void rethrow_exceptions(JVMState* jvms);
773 void return_values(JVMState* jvms);
774 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
775
776 // Decide how to build a call.
777 // The profile factor is a discount to apply to this site's interp. profile.
778 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
779 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
780 return should_delay_string_inlining(call_method, jvms) ||
781 should_delay_boxing_inlining(call_method, jvms);
782 }
783 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
784 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
785
786 // Helper functions to identify inlining potential at call-site
787 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
788 ciMethod* callee, const TypeOopPtr* receiver_type,
789 bool is_virtual,
790 bool &call_does_dispatch, int &vtable_index);
791 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
792 ciMethod* callee, const TypeOopPtr* receiver_type);
793
794 // Report if there were too many traps at a current method and bci.
795 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
796 // If there is no MDO at all, report no trap unless told to assume it.
797 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
798 // This version, unspecific to a particular bci, asks if
799 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
800 bool too_many_traps(Deoptimization::DeoptReason reason,
801 // Privately used parameter for logging:
802 ciMethodData* logmd = NULL);
803 // Report if there were too many recompiles at a method and bci.
804 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
820 void remove_useless_nodes (Unique_Node_List &useful);
821
822 WarmCallInfo* warm_calls() const { return _warm_calls; }
823 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
824 WarmCallInfo* pop_warm_call();
825
826 // Record this CallGenerator for inlining at the end of parsing.
827 void add_late_inline(CallGenerator* cg) {
828 _late_inlines.insert_before(_late_inlines_pos, cg);
829 _late_inlines_pos++;
830 }
831
832 void prepend_late_inline(CallGenerator* cg) {
833 _late_inlines.insert_before(0, cg);
834 }
835
836 void add_string_late_inline(CallGenerator* cg) {
837 _string_late_inlines.push(cg);
838 }
839
840 void add_boxing_late_inline(CallGenerator* cg) {
841 _boxing_late_inlines.push(cg);
842 }
843
844 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
845
846 void dump_inlining();
847
848 bool over_inlining_cutoff() const {
849 if (!inlining_incrementally()) {
850 return unique() > (uint)NodeCountInliningCutoff;
851 } else {
852 return live_nodes() > (uint)LiveNodeCountInliningCutoff;
853 }
854 }
855
856 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
857 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
858 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
859
860 bool has_late_inline() const { return ((_late_inlines.length() + _string_late_inlines.length()) > 0); }
861 void inline_incrementally_one(PhaseIterGVN& igvn);
862 void inline_incrementally(PhaseIterGVN& igvn);
863 void inline_string_calls(bool parse_time);
864 void inline_boxing_calls(PhaseIterGVN& igvn);
865
866 // Matching, CFG layout, allocation, code generation
867 PhaseCFG* cfg() { return _cfg; }
868 bool select_24_bit_instr() const { return _select_24_bit_instr; }
869 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
870 bool has_java_calls() const { return _java_calls > 0; }
871 int java_calls() const { return _java_calls; }
872 int inner_loops() const { return _inner_loops; }
873 Matcher* matcher() { return _matcher; }
874 PhaseRegAlloc* regalloc() { return _regalloc; }
875 int frame_slots() const { return _frame_slots; }
876 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
877 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
878 Arena* indexSet_arena() { return _indexSet_arena; }
879 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
880 uint node_bundling_limit() { return _node_bundling_limit; }
881 Bundle* node_bundling_base() { return _node_bundling_base; }
882 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
883 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
884 bool starts_bundle(const Node *n) const;
917 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
918 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
919
920 // emit to scratch blob, report resulting size
921 uint scratch_emit_size(const Node* n);
922 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; }
923 bool in_scratch_emit_size() const { return _in_scratch_emit_size; }
924
925 enum ScratchBufferBlob {
926 MAX_inst_size = 1024,
927 MAX_locs_size = 128, // number of relocInfo elements
928 MAX_const_size = 128,
929 MAX_stubs_size = 128
930 };
931
932 // Major entry point. Given a Scope, compile the associated method.
933 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
934 // replacement, entry_bci indicates the bytecode for which to compile a
935 // continuation.
936 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
937 int entry_bci, bool subsume_loads, bool do_escape_analysis,
938 bool eliminate_boxing);
939
940 // Second major entry point. From the TypeFunc signature, generate code
941 // to pass arguments from the Java calling convention to the C calling
942 // convention.
943 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
944 address stub_function, const char *stub_name,
945 int is_fancy_jump, bool pass_tls,
946 bool save_arg_registers, bool return_pc);
947
948 // From the TypeFunc signature, generate code to pass arguments
949 // from Compiled calling convention to Interpreter's calling convention
950 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
951
952 // From the TypeFunc signature, generate code to pass arguments
953 // from Interpreter's calling convention to Compiler's calling convention
954 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
955
956 // Are we compiling a method?
957 bool has_method() { return method() != NULL; }
958
|