1 /* 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 class Block; 26 class Bundle; 27 class C2Compiler; 28 class CallGenerator; 29 class ConnectionGraph; 30 class InlineTree; 31 class Int_Array; 32 class Matcher; 33 class MachNode; 34 class MachSafePointNode; 35 class Node; 36 class Node_Array; 37 class Node_Notes; 38 class OptoReg; 39 class PhaseCFG; 40 class PhaseGVN; 41 class PhaseIterGVN; 42 class PhaseRegAlloc; 43 class PhaseCCP; 44 class PhaseCCP_DCE; 45 class RootNode; 46 class relocInfo; 47 class Scope; 48 class StartNode; 49 class SafePointNode; 50 class JVMState; 51 class TypeData; 52 class TypePtr; 53 class TypeFunc; 54 class Unique_Node_List; 55 class nmethod; 56 class WarmCallInfo; 57 58 //------------------------------Compile---------------------------------------- 59 // This class defines a top-level Compiler invocation. 60 61 class Compile : public Phase { 62 public: 63 // Fixed alias indexes. (See also MergeMemNode.) 64 enum { 65 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) 66 AliasIdxBot = 2, // pseudo-index, aliases to everything 67 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM 68 }; 69 70 // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler); 71 // Integrated with logging. If logging is turned on, and dolog is true, 72 // then brackets are put into the log, with time stamps and node counts. 73 // (The time collection itself is always conditionalized on TimeCompiler.) 74 class TracePhase : public TraceTime { 75 private: 76 Compile* C; 77 CompileLog* _log; 78 public: 79 TracePhase(const char* name, elapsedTimer* accumulator, bool dolog); 80 ~TracePhase(); 81 }; 82 83 // Information per category of alias (memory slice) 84 class AliasType { 85 private: 86 friend class Compile; 87 88 int _index; // unique index, used with MergeMemNode 89 const TypePtr* _adr_type; // normalized address type 90 ciField* _field; // relevant instance field, or null if none 91 bool _is_rewritable; // false if the memory is write-once only 92 int _general_index; // if this is type is an instance, the general 93 // type that this is an instance of 94 95 void Init(int i, const TypePtr* at); 96 97 public: 98 int index() const { return _index; } 99 const TypePtr* adr_type() const { return _adr_type; } 100 ciField* field() const { return _field; } 101 bool is_rewritable() const { return _is_rewritable; } 102 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } 103 int general_index() const { return (_general_index != 0) ? _general_index : _index; } 104 105 void set_rewritable(bool z) { _is_rewritable = z; } 106 void set_field(ciField* f) { 107 assert(!_field,""); 108 _field = f; 109 if (f->is_final()) _is_rewritable = false; 110 } 111 112 void print_on(outputStream* st) PRODUCT_RETURN; 113 }; 114 115 enum { 116 logAliasCacheSize = 6, 117 AliasCacheSize = (1<<logAliasCacheSize) 118 }; 119 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type 120 enum { 121 trapHistLength = methodDataOopDesc::_trap_hist_limit 122 }; 123 124 private: 125 // Fixed parameters to this compilation. 126 const int _compile_id; 127 const bool _save_argument_registers; // save/restore arg regs for trampolines 128 const bool _subsume_loads; // Load can be matched as part of a larger op. 129 const bool _do_escape_analysis; // Do escape analysis. 130 ciMethod* _method; // The method being compiled. 131 int _entry_bci; // entry bci for osr methods. 132 const TypeFunc* _tf; // My kind of signature 133 InlineTree* _ilt; // Ditto (temporary). 134 address _stub_function; // VM entry for stub being compiled, or NULL 135 const char* _stub_name; // Name of stub or adapter being compiled, or NULL 136 address _stub_entry_point; // Compile code entry for generated stub, or NULL 137 138 // Control of this compilation. 139 int _num_loop_opts; // Number of iterations for doing loop optimiztions 140 int _max_inline_size; // Max inline size for this compilation 141 int _freq_inline_size; // Max hot method inline size for this compilation 142 int _fixed_slots; // count of frame slots not allocated by the register 143 // allocator i.e. locks, original deopt pc, etc. 144 // For deopt 145 int _orig_pc_slot; 146 int _orig_pc_slot_offset_in_bytes; 147 148 int _major_progress; // Count of something big happening 149 bool _deopt_happens; // TRUE if de-optimization CAN happen 150 bool _has_loops; // True if the method _may_ have some loops 151 bool _has_split_ifs; // True if the method _may_ have some split-if 152 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. 153 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated 154 uint _trap_hist[trapHistLength]; // Cumulative traps 155 bool _trap_can_recompile; // Have we emitted a recompiling trap? 156 uint _decompile_count; // Cumulative decompilation counts. 157 bool _do_inlining; // True if we intend to do inlining 158 bool _do_scheduling; // True if we intend to do scheduling 159 bool _do_freq_based_layout; // True if we intend to do frequency based block layout 160 bool _do_count_invocations; // True if we generate code to count invocations 161 bool _do_method_data_update; // True if we generate code to update methodDataOops 162 int _AliasLevel; // Locally-adjusted version of AliasLevel flag. 163 bool _print_assembly; // True if we should dump assembly code for this compilation 164 #ifndef PRODUCT 165 bool _trace_opto_output; 166 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing 167 #endif 168 169 // Compilation environment. 170 Arena _comp_arena; // Arena with lifetime equivalent to Compile 171 ciEnv* _env; // CI interface 172 CompileLog* _log; // from CompilerThread 173 const char* _failure_reason; // for record_failure/failing pattern 174 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. 175 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. 176 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. 177 ConnectionGraph* _congraph; 178 #ifndef PRODUCT 179 IdealGraphPrinter* _printer; 180 #endif 181 182 // Node management 183 uint _unique; // Counter for unique Node indices 184 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> 185 Arena _node_arena; // Arena for new-space Nodes 186 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform 187 RootNode* _root; // Unique root of compilation, or NULL after bail-out. 188 Node* _top; // Unique top node. (Reset by various phases.) 189 190 Node* _immutable_memory; // Initial memory state 191 192 Node* _recent_alloc_obj; 193 Node* _recent_alloc_ctl; 194 195 // Blocked array of debugging and profiling information, 196 // tracked per node. 197 enum { _log2_node_notes_block_size = 8, 198 _node_notes_block_size = (1<<_log2_node_notes_block_size) 199 }; 200 GrowableArray<Node_Notes*>* _node_note_array; 201 Node_Notes* _default_node_notes; // default notes for new nodes 202 203 // After parsing and every bulk phase we hang onto the Root instruction. 204 // The RootNode instruction is where the whole program begins. It produces 205 // the initial Control and BOTTOM for everybody else. 206 207 // Type management 208 Arena _Compile_types; // Arena for all types 209 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() 210 Dict* _type_dict; // Intern table 211 void* _type_hwm; // Last allocation (see Type::operator new/delete) 212 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) 213 ciMethod* _last_tf_m; // Cache for 214 const TypeFunc* _last_tf; // TypeFunc::make 215 AliasType** _alias_types; // List of alias types seen so far. 216 int _num_alias_types; // Logical length of _alias_types 217 int _max_alias_types; // Physical length of _alias_types 218 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking 219 220 // Parsing, optimization 221 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN 222 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN 223 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. 224 225 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after 226 // main parsing has finished. 227 228 // Matching, CFG layout, allocation, code generation 229 PhaseCFG* _cfg; // Results of CFG finding 230 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result 231 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results 232 int _java_calls; // Number of java calls in the method 233 int _inner_loops; // Number of inner loops in the method 234 Matcher* _matcher; // Engine to map ideal to machine instructions 235 PhaseRegAlloc* _regalloc; // Results of register allocation. 236 int _frame_slots; // Size of total frame in stack slots 237 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries 238 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) 239 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin 240 void* _indexSet_free_block_list; // free list of IndexSet bit blocks 241 242 uint _node_bundling_limit; 243 Bundle* _node_bundling_base; // Information for instruction bundling 244 245 // Instruction bits passed off to the VM 246 int _method_size; // Size of nmethod code segment in bytes 247 CodeBuffer _code_buffer; // Where the code is assembled 248 int _first_block_size; // Size of unvalidated entry point code / OSR poison code 249 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers 250 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code 251 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location) 252 static int _CompiledZap_count; // counter compared against CompileZap[First/Last] 253 BufferBlob* _scratch_buffer_blob; // For temporary code buffers. 254 relocInfo* _scratch_locs_memory; // For temporary code buffers. 255 256 public: 257 // Accessors 258 259 // The Compile instance currently active in this (compiler) thread. 260 static Compile* current() { 261 return (Compile*) ciEnv::current()->compiler_data(); 262 } 263 264 // ID for this compilation. Useful for setting breakpoints in the debugger. 265 int compile_id() const { return _compile_id; } 266 267 // Does this compilation allow instructions to subsume loads? User 268 // instructions that subsume a load may result in an unschedulable 269 // instruction sequence. 270 bool subsume_loads() const { return _subsume_loads; } 271 // Do escape analysis. 272 bool do_escape_analysis() const { return _do_escape_analysis; } 273 bool save_argument_registers() const { return _save_argument_registers; } 274 275 276 // Other fixed compilation parameters. 277 ciMethod* method() const { return _method; } 278 int entry_bci() const { return _entry_bci; } 279 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } 280 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } 281 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } 282 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } 283 InlineTree* ilt() const { return _ilt; } 284 address stub_function() const { return _stub_function; } 285 const char* stub_name() const { return _stub_name; } 286 address stub_entry_point() const { return _stub_entry_point; } 287 288 // Control of this compilation. 289 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } 290 void set_fixed_slots(int n) { _fixed_slots = n; } 291 int major_progress() const { return _major_progress; } 292 void set_major_progress() { _major_progress++; } 293 void clear_major_progress() { _major_progress = 0; } 294 int num_loop_opts() const { return _num_loop_opts; } 295 void set_num_loop_opts(int n) { _num_loop_opts = n; } 296 int max_inline_size() const { return _max_inline_size; } 297 void set_freq_inline_size(int n) { _freq_inline_size = n; } 298 int freq_inline_size() const { return _freq_inline_size; } 299 void set_max_inline_size(int n) { _max_inline_size = n; } 300 bool deopt_happens() const { return _deopt_happens; } 301 bool has_loops() const { return _has_loops; } 302 void set_has_loops(bool z) { _has_loops = z; } 303 bool has_split_ifs() const { return _has_split_ifs; } 304 void set_has_split_ifs(bool z) { _has_split_ifs = z; } 305 bool has_unsafe_access() const { return _has_unsafe_access; } 306 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 307 bool has_stringbuilder() const { return _has_stringbuilder; } 308 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } 309 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } 310 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } 311 bool trap_can_recompile() const { return _trap_can_recompile; } 312 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } 313 uint decompile_count() const { return _decompile_count; } 314 void set_decompile_count(uint c) { _decompile_count = c; } 315 bool allow_range_check_smearing() const; 316 bool do_inlining() const { return _do_inlining; } 317 void set_do_inlining(bool z) { _do_inlining = z; } 318 bool do_scheduling() const { return _do_scheduling; } 319 void set_do_scheduling(bool z) { _do_scheduling = z; } 320 bool do_freq_based_layout() const{ return _do_freq_based_layout; } 321 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } 322 bool do_count_invocations() const{ return _do_count_invocations; } 323 void set_do_count_invocations(bool z){ _do_count_invocations = z; } 324 bool do_method_data_update() const { return _do_method_data_update; } 325 void set_do_method_data_update(bool z) { _do_method_data_update = z; } 326 int AliasLevel() const { return _AliasLevel; } 327 bool print_assembly() const { return _print_assembly; } 328 void set_print_assembly(bool z) { _print_assembly = z; } 329 // check the CompilerOracle for special behaviours for this compile 330 bool method_has_option(const char * option) { 331 return method() != NULL && method()->has_option(option); 332 } 333 #ifndef PRODUCT 334 bool trace_opto_output() const { return _trace_opto_output; } 335 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } 336 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } 337 #endif 338 339 void begin_method() { 340 #ifndef PRODUCT 341 if (_printer) _printer->begin_method(this); 342 #endif 343 } 344 void print_method(const char * name, int level = 1) { 345 #ifndef PRODUCT 346 if (_printer) _printer->print_method(this, name, level); 347 #endif 348 } 349 void end_method() { 350 #ifndef PRODUCT 351 if (_printer) _printer->end_method(); 352 #endif 353 } 354 355 int macro_count() { return _macro_nodes->length(); } 356 int predicate_count() { return _predicate_opaqs->length();} 357 Node* macro_node(int idx) { return _macro_nodes->at(idx); } 358 Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);} 359 ConnectionGraph* congraph() { return _congraph;} 360 void add_macro_node(Node * n) { 361 //assert(n->is_macro(), "must be a macro node"); 362 assert(!_macro_nodes->contains(n), " duplicate entry in expand list"); 363 _macro_nodes->append(n); 364 } 365 void remove_macro_node(Node * n) { 366 // this function may be called twice for a node so check 367 // that the node is in the array before attempting to remove it 368 if (_macro_nodes->contains(n)) 369 _macro_nodes->remove(n); 370 // remove from _predicate_opaqs list also if it is there 371 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ 372 _predicate_opaqs->remove(n); 373 } 374 } 375 void add_predicate_opaq(Node * n) { 376 assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1"); 377 assert(_macro_nodes->contains(n), "should have already been in macro list"); 378 _predicate_opaqs->append(n); 379 } 380 //remove the opaque nodes that protect the predicates so that the unused checks and 381 //uncommon traps will be eliminated from the graph. 382 void cleanup_loop_predicates(PhaseIterGVN &igvn); 383 384 // Compilation environment. 385 Arena* comp_arena() { return &_comp_arena; } 386 ciEnv* env() const { return _env; } 387 CompileLog* log() const { return _log; } 388 bool failing() const { return _env->failing() || _failure_reason != NULL; } 389 const char* failure_reason() { return _failure_reason; } 390 bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); } 391 392 void record_failure(const char* reason); 393 void record_method_not_compilable(const char* reason, bool all_tiers = false) { 394 // All bailouts cover "all_tiers" when TieredCompilation is off. 395 if (!TieredCompilation) all_tiers = true; 396 env()->record_method_not_compilable(reason, all_tiers); 397 // Record failure reason. 398 record_failure(reason); 399 } 400 void record_method_not_compilable_all_tiers(const char* reason) { 401 record_method_not_compilable(reason, true); 402 } 403 bool check_node_count(uint margin, const char* reason) { 404 if (unique() + margin > (uint)MaxNodeLimit) { 405 record_method_not_compilable(reason); 406 return true; 407 } else { 408 return false; 409 } 410 } 411 412 // Node management 413 uint unique() const { return _unique; } 414 uint next_unique() { return _unique++; } 415 void set_unique(uint i) { _unique = i; } 416 static int debug_idx() { return debug_only(_debug_idx)+0; } 417 static void set_debug_idx(int i) { debug_only(_debug_idx = i); } 418 Arena* node_arena() { return &_node_arena; } 419 Arena* old_arena() { return &_old_arena; } 420 RootNode* root() const { return _root; } 421 void set_root(RootNode* r) { _root = r; } 422 StartNode* start() const; // (Derived from root.) 423 void init_start(StartNode* s); 424 Node* immutable_memory(); 425 426 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } 427 Node* recent_alloc_obj() const { return _recent_alloc_obj; } 428 void set_recent_alloc(Node* ctl, Node* obj) { 429 _recent_alloc_ctl = ctl; 430 _recent_alloc_obj = obj; 431 } 432 433 // Handy undefined Node 434 Node* top() const { return _top; } 435 436 // these are used by guys who need to know about creation and transformation of top: 437 Node* cached_top_node() { return _top; } 438 void set_cached_top_node(Node* tn); 439 440 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } 441 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } 442 Node_Notes* default_node_notes() const { return _default_node_notes; } 443 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } 444 445 Node_Notes* node_notes_at(int idx) { 446 return locate_node_notes(_node_note_array, idx, false); 447 } 448 inline bool set_node_notes_at(int idx, Node_Notes* value); 449 450 // Copy notes from source to dest, if they exist. 451 // Overwrite dest only if source provides something. 452 // Return true if information was moved. 453 bool copy_node_notes_to(Node* dest, Node* source); 454 455 // Workhorse function to sort out the blocked Node_Notes array: 456 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, 457 int idx, bool can_grow = false); 458 459 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); 460 461 // Type management 462 Arena* type_arena() { return _type_arena; } 463 Dict* type_dict() { return _type_dict; } 464 void* type_hwm() { return _type_hwm; } 465 size_t type_last_size() { return _type_last_size; } 466 int num_alias_types() { return _num_alias_types; } 467 468 void init_type_arena() { _type_arena = &_Compile_types; } 469 void set_type_arena(Arena* a) { _type_arena = a; } 470 void set_type_dict(Dict* d) { _type_dict = d; } 471 void set_type_hwm(void* p) { _type_hwm = p; } 472 void set_type_last_size(size_t sz) { _type_last_size = sz; } 473 474 const TypeFunc* last_tf(ciMethod* m) { 475 return (m == _last_tf_m) ? _last_tf : NULL; 476 } 477 void set_last_tf(ciMethod* m, const TypeFunc* tf) { 478 assert(m != NULL || tf == NULL, ""); 479 _last_tf_m = m; 480 _last_tf = tf; 481 } 482 483 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 484 AliasType* alias_type(const TypePtr* adr_type) { return find_alias_type(adr_type, false); } 485 bool have_alias_type(const TypePtr* adr_type); 486 AliasType* alias_type(ciField* field); 487 488 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); } 489 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } 490 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } 491 492 // Building nodes 493 void rethrow_exceptions(JVMState* jvms); 494 void return_values(JVMState* jvms); 495 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); 496 497 // Decide how to build a call. 498 // The profile factor is a discount to apply to this site's interp. profile. 499 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor); 500 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms); 501 502 // Report if there were too many traps at a current method and bci. 503 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 504 // If there is no MDO at all, report no trap unless told to assume it. 505 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 506 // This version, unspecific to a particular bci, asks if 507 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. 508 bool too_many_traps(Deoptimization::DeoptReason reason, 509 // Privately used parameter for logging: 510 ciMethodData* logmd = NULL); 511 // Report if there were too many recompiles at a method and bci. 512 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 513 514 // Parsing, optimization 515 PhaseGVN* initial_gvn() { return _initial_gvn; } 516 Unique_Node_List* for_igvn() { return _for_igvn; } 517 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. 518 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } 519 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } 520 521 // Replace n by nn using initial_gvn, calling hash_delete and 522 // record_for_igvn as needed. 523 void gvn_replace_by(Node* n, Node* nn); 524 525 526 void identify_useful_nodes(Unique_Node_List &useful); 527 void remove_useless_nodes (Unique_Node_List &useful); 528 529 WarmCallInfo* warm_calls() const { return _warm_calls; } 530 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } 531 WarmCallInfo* pop_warm_call(); 532 533 // Record this CallGenerator for inlining at the end of parsing. 534 void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); } 535 536 // Matching, CFG layout, allocation, code generation 537 PhaseCFG* cfg() { return _cfg; } 538 bool select_24_bit_instr() const { return _select_24_bit_instr; } 539 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } 540 bool has_java_calls() const { return _java_calls > 0; } 541 int java_calls() const { return _java_calls; } 542 int inner_loops() const { return _inner_loops; } 543 Matcher* matcher() { return _matcher; } 544 PhaseRegAlloc* regalloc() { return _regalloc; } 545 int frame_slots() const { return _frame_slots; } 546 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words' 547 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } 548 Arena* indexSet_arena() { return _indexSet_arena; } 549 void* indexSet_free_block_list() { return _indexSet_free_block_list; } 550 uint node_bundling_limit() { return _node_bundling_limit; } 551 Bundle* node_bundling_base() { return _node_bundling_base; } 552 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; } 553 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; } 554 bool starts_bundle(const Node *n) const; 555 bool need_stack_bang(int frame_size_in_bytes) const; 556 bool need_register_stack_bang() const; 557 558 void set_matcher(Matcher* m) { _matcher = m; } 559 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } 560 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } 561 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } 562 563 // Remember if this compilation changes hardware mode to 24-bit precision 564 void set_24_bit_selection_and_mode(bool selection, bool mode) { 565 _select_24_bit_instr = selection; 566 _in_24_bit_fp_mode = mode; 567 } 568 569 void set_java_calls(int z) { _java_calls = z; } 570 void set_inner_loops(int z) { _inner_loops = z; } 571 572 // Instruction bits passed off to the VM 573 int code_size() { return _method_size; } 574 CodeBuffer* code_buffer() { return &_code_buffer; } 575 int first_block_size() { return _first_block_size; } 576 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } 577 ExceptionHandlerTable* handler_table() { return &_handler_table; } 578 ImplicitExceptionTable* inc_table() { return &_inc_table; } 579 OopMapSet* oop_map_set() { return _oop_map_set; } 580 DebugInformationRecorder* debug_info() { return env()->debug_info(); } 581 Dependencies* dependencies() { return env()->dependencies(); } 582 static int CompiledZap_count() { return _CompiledZap_count; } 583 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } 584 void init_scratch_buffer_blob(); 585 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } 586 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } 587 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } 588 589 // emit to scratch blob, report resulting size 590 uint scratch_emit_size(const Node* n); 591 592 enum ScratchBufferBlob { 593 MAX_inst_size = 1024, 594 MAX_locs_size = 128, // number of relocInfo elements 595 MAX_const_size = 128, 596 MAX_stubs_size = 128 597 }; 598 599 // Major entry point. Given a Scope, compile the associated method. 600 // For normal compilations, entry_bci is InvocationEntryBci. For on stack 601 // replacement, entry_bci indicates the bytecode for which to compile a 602 // continuation. 603 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, 604 int entry_bci, bool subsume_loads, bool do_escape_analysis); 605 606 // Second major entry point. From the TypeFunc signature, generate code 607 // to pass arguments from the Java calling convention to the C calling 608 // convention. 609 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), 610 address stub_function, const char *stub_name, 611 int is_fancy_jump, bool pass_tls, 612 bool save_arg_registers, bool return_pc); 613 614 // From the TypeFunc signature, generate code to pass arguments 615 // from Compiled calling convention to Interpreter's calling convention 616 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); 617 618 // From the TypeFunc signature, generate code to pass arguments 619 // from Interpreter's calling convention to Compiler's calling convention 620 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); 621 622 // Are we compiling a method? 623 bool has_method() { return method() != NULL; } 624 625 // Maybe print some information about this compile. 626 void print_compile_messages(); 627 628 // Final graph reshaping, a post-pass after the regular optimizer is done. 629 bool final_graph_reshaping(); 630 631 // returns true if adr is completely contained in the given alias category 632 bool must_alias(const TypePtr* adr, int alias_idx); 633 634 // returns true if adr overlaps with the given alias category 635 bool can_alias(const TypePtr* adr, int alias_idx); 636 637 // Driver for converting compiler's IR into machine code bits 638 void Output(); 639 640 // Accessors for node bundling info. 641 Bundle* node_bundling(const Node *n); 642 bool valid_bundle_info(const Node *n); 643 644 // Schedule and Bundle the instructions 645 void ScheduleAndBundle(); 646 647 // Build OopMaps for each GC point 648 void BuildOopMaps(); 649 650 // Append debug info for the node "local" at safepoint node "sfpt" to the 651 // "array", May also consult and add to "objs", which describes the 652 // scalar-replaced objects. 653 void FillLocArray( int idx, MachSafePointNode* sfpt, 654 Node *local, GrowableArray<ScopeValue*> *array, 655 GrowableArray<ScopeValue*> *objs ); 656 657 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. 658 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); 659 // Requres that "objs" does not contains an ObjectValue whose id matches 660 // that of "sv. Appends "sv". 661 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, 662 ObjectValue* sv ); 663 664 // Process an OopMap Element while emitting nodes 665 void Process_OopMap_Node(MachNode *mach, int code_offset); 666 667 // Write out basic block data to code buffer 668 void Fill_buffer(); 669 670 // Determine which variable sized branches can be shortened 671 void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size); 672 673 // Compute the size of first NumberOfLoopInstrToAlign instructions 674 // at the head of a loop. 675 void compute_loop_first_inst_sizes(); 676 677 // Compute the information for the exception tables 678 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels); 679 680 // Stack slots that may be unused by the calling convention but must 681 // otherwise be preserved. On Intel this includes the return address. 682 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 683 uint in_preserve_stack_slots(); 684 685 // "Top of Stack" slots that may be unused by the calling convention but must 686 // otherwise be preserved. 687 // On Intel these are not necessary and the value can be zero. 688 // On Sparc this describes the words reserved for storing a register window 689 // when an interrupt occurs. 690 static uint out_preserve_stack_slots(); 691 692 // Number of outgoing stack slots killed above the out_preserve_stack_slots 693 // for calls to C. Supports the var-args backing area for register parms. 694 uint varargs_C_out_slots_killed() const; 695 696 // Number of Stack Slots consumed by a synchronization entry 697 int sync_stack_slots() const; 698 699 // Compute the name of old_SP. See <arch>.ad for frame layout. 700 OptoReg::Name compute_old_SP(); 701 702 #ifdef ENABLE_ZAP_DEAD_LOCALS 703 static bool is_node_getting_a_safepoint(Node*); 704 void Insert_zap_nodes(); 705 Node* call_zap_node(MachSafePointNode* n, int block_no); 706 #endif 707 708 private: 709 // Phase control: 710 void Init(int aliaslevel); // Prepare for a single compilation 711 int Inline_Warm(); // Find more inlining work. 712 void Finish_Warm(); // Give up on further inlines. 713 void Optimize(); // Given a graph, optimize it 714 void Code_Gen(); // Generate code from a graph 715 716 // Management of the AliasType table. 717 void grow_alias_types(); 718 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 719 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 720 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create); 721 722 void verify_top(Node*) const PRODUCT_RETURN; 723 724 // Intrinsic setup. 725 void register_library_intrinsics(); // initializer 726 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor 727 int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper 728 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn 729 void register_intrinsic(CallGenerator* cg); // update fn 730 731 #ifndef PRODUCT 732 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; 733 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; 734 #endif 735 736 public: 737 738 // Note: Histogram array size is about 1 Kb. 739 enum { // flag bits: 740 _intrinsic_worked = 1, // succeeded at least once 741 _intrinsic_failed = 2, // tried it but it failed 742 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) 743 _intrinsic_virtual = 8, // was seen in the virtual form (rare) 744 _intrinsic_both = 16 // was seen in the non-virtual form (usual) 745 }; 746 // Update histogram. Return boolean if this is a first-time occurrence. 747 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, 748 bool is_virtual, int flags) PRODUCT_RETURN0; 749 static void print_intrinsic_statistics() PRODUCT_RETURN; 750 751 // Graph verification code 752 // Walk the node list, verifying that there is a one-to-one 753 // correspondence between Use-Def edges and Def-Use edges 754 // The option no_dead_code enables stronger checks that the 755 // graph is strongly connected from root in both directions. 756 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; 757 758 // Print bytecodes, including the scope inlining tree 759 void print_codes(); 760 761 // End-of-run dumps. 762 static void print_statistics() PRODUCT_RETURN; 763 764 // Dump formatted assembly 765 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN; 766 void dump_pc(int *pcs, int pc_limit, Node *n); 767 768 // Verify ADLC assumptions during startup 769 static void adlc_verification() PRODUCT_RETURN; 770 771 // Definitions of pd methods 772 static void pd_compiler2_init(); 773 };