1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_COMPILE_HPP 26 #define SHARE_VM_OPTO_COMPILE_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "ci/compilerInterface.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "code/exceptionHandlerTable.hpp" 32 #include "compiler/compilerOracle.hpp" 33 #include "libadt/dict.hpp" 34 #include "libadt/port.hpp" 35 #include "libadt/vectset.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "opto/idealGraphPrinter.hpp" 38 #include "opto/phase.hpp" 39 #include "opto/regmask.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/vmThread.hpp" 42 43 class Block; 44 class Bundle; 45 class C2Compiler; 46 class CallGenerator; 47 class ConnectionGraph; 48 class InlineTree; 49 class Int_Array; 50 class Matcher; 51 class MachConstantNode; 52 class MachConstantBaseNode; 53 class MachNode; 54 class MachOper; 55 class MachSafePointNode; 56 class Node; 57 class Node_Array; 58 class Node_Notes; 59 class OptoReg; 60 class PhaseCFG; 61 class PhaseGVN; 62 class PhaseIterGVN; 63 class PhaseRegAlloc; 64 class PhaseCCP; 65 class PhaseCCP_DCE; 66 class RootNode; 67 class relocInfo; 68 class Scope; 69 class StartNode; 70 class SafePointNode; 71 class JVMState; 72 class TypeData; 73 class TypePtr; 74 class TypeFunc; 75 class Unique_Node_List; 76 class nmethod; 77 class WarmCallInfo; 78 class Node_Stack; 79 struct Final_Reshape_Counts; 80 81 //------------------------------Compile---------------------------------------- 82 // This class defines a top-level Compiler invocation. 83 84 class Compile : public Phase { 85 friend class VMStructs; 86 87 public: 88 // Fixed alias indexes. (See also MergeMemNode.) 89 enum { 90 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) 91 AliasIdxBot = 2, // pseudo-index, aliases to everything 92 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM 93 }; 94 95 // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler); 96 // Integrated with logging. If logging is turned on, and dolog is true, 97 // then brackets are put into the log, with time stamps and node counts. 98 // (The time collection itself is always conditionalized on TimeCompiler.) 99 class TracePhase : public TraceTime { 100 private: 101 Compile* C; 102 CompileLog* _log; 103 const char* _phase_name; 104 bool _dolog; 105 public: 106 TracePhase(const char* name, elapsedTimer* accumulator, bool dolog); 107 ~TracePhase(); 108 }; 109 110 // Information per category of alias (memory slice) 111 class AliasType { 112 private: 113 friend class Compile; 114 115 int _index; // unique index, used with MergeMemNode 116 const TypePtr* _adr_type; // normalized address type 117 ciField* _field; // relevant instance field, or null if none 118 bool _is_rewritable; // false if the memory is write-once only 119 int _general_index; // if this is type is an instance, the general 120 // type that this is an instance of 121 122 void Init(int i, const TypePtr* at); 123 124 public: 125 int index() const { return _index; } 126 const TypePtr* adr_type() const { return _adr_type; } 127 ciField* field() const { return _field; } 128 bool is_rewritable() const { return _is_rewritable; } 129 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } 130 int general_index() const { return (_general_index != 0) ? _general_index : _index; } 131 132 void set_rewritable(bool z) { _is_rewritable = z; } 133 void set_field(ciField* f) { 134 assert(!_field,""); 135 _field = f; 136 if (f->is_final()) _is_rewritable = false; 137 } 138 139 void print_on(outputStream* st) PRODUCT_RETURN; 140 }; 141 142 enum { 143 logAliasCacheSize = 6, 144 AliasCacheSize = (1<<logAliasCacheSize) 145 }; 146 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type 147 enum { 148 trapHistLength = MethodData::_trap_hist_limit 149 }; 150 151 // Constant entry of the constant table. 152 class Constant { 153 private: 154 BasicType _type; 155 union { 156 jvalue _value; 157 Metadata* _metadata; 158 } _v; 159 int _offset; // offset of this constant (in bytes) relative to the constant table base. 160 float _freq; 161 bool _can_be_reused; // true (default) if the value can be shared with other users. 162 163 public: 164 Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _v._value.l = 0; } 165 Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) : 166 _type(type), 167 _offset(-1), 168 _freq(freq), 169 _can_be_reused(can_be_reused) 170 { 171 assert(type != T_METADATA, "wrong constructor"); 172 _v._value = value; 173 } 174 Constant(Metadata* metadata, bool can_be_reused = true) : 175 _type(T_METADATA), 176 _offset(-1), 177 _freq(0.0f), 178 _can_be_reused(can_be_reused) 179 { 180 _v._metadata = metadata; 181 } 182 183 bool operator==(const Constant& other); 184 185 BasicType type() const { return _type; } 186 187 jlong get_jlong() const { return _v._value.j; } 188 jfloat get_jfloat() const { return _v._value.f; } 189 jdouble get_jdouble() const { return _v._value.d; } 190 jobject get_jobject() const { return _v._value.l; } 191 192 Metadata* get_metadata() const { return _v._metadata; } 193 194 int offset() const { return _offset; } 195 void set_offset(int offset) { _offset = offset; } 196 197 float freq() const { return _freq; } 198 void inc_freq(float freq) { _freq += freq; } 199 200 bool can_be_reused() const { return _can_be_reused; } 201 }; 202 203 // Constant table. 204 class ConstantTable { 205 private: 206 GrowableArray<Constant> _constants; // Constants of this table. 207 int _size; // Size in bytes the emitted constant table takes (including padding). 208 int _table_base_offset; // Offset of the table base that gets added to the constant offsets. 209 int _nof_jump_tables; // Number of jump-tables in this constant table. 210 211 static int qsort_comparator(Constant* a, Constant* b); 212 213 // We use negative frequencies to keep the order of the 214 // jump-tables in which they were added. Otherwise we get into 215 // trouble with relocation. 216 float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); } 217 218 public: 219 ConstantTable() : 220 _size(-1), 221 _table_base_offset(-1), // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit). 222 _nof_jump_tables(0) 223 {} 224 225 int size() const { assert(_size != -1, "not calculated yet"); return _size; } 226 227 int calculate_table_base_offset() const; // AD specific 228 void set_table_base_offset(int x) { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; } 229 int table_base_offset() const { assert(_table_base_offset != -1, "not set yet"); return _table_base_offset; } 230 231 void emit(CodeBuffer& cb); 232 233 // Returns the offset of the last entry (the top) of the constant table. 234 int top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); } 235 236 void calculate_offsets_and_size(); 237 int find_offset(Constant& con) const; 238 239 void add(Constant& con); 240 Constant add(MachConstantNode* n, BasicType type, jvalue value); 241 Constant add(Metadata* metadata); 242 Constant add(MachConstantNode* n, MachOper* oper); 243 Constant add(MachConstantNode* n, jfloat f) { 244 jvalue value; value.f = f; 245 return add(n, T_FLOAT, value); 246 } 247 Constant add(MachConstantNode* n, jdouble d) { 248 jvalue value; value.d = d; 249 return add(n, T_DOUBLE, value); 250 } 251 252 // Jump-table 253 Constant add_jump_table(MachConstantNode* n); 254 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const; 255 }; 256 257 private: 258 // Fixed parameters to this compilation. 259 const int _compile_id; 260 const bool _save_argument_registers; // save/restore arg regs for trampolines 261 const bool _subsume_loads; // Load can be matched as part of a larger op. 262 const bool _do_escape_analysis; // Do escape analysis. 263 ciMethod* _method; // The method being compiled. 264 int _entry_bci; // entry bci for osr methods. 265 const TypeFunc* _tf; // My kind of signature 266 InlineTree* _ilt; // Ditto (temporary). 267 address _stub_function; // VM entry for stub being compiled, or NULL 268 const char* _stub_name; // Name of stub or adapter being compiled, or NULL 269 address _stub_entry_point; // Compile code entry for generated stub, or NULL 270 271 // Control of this compilation. 272 int _num_loop_opts; // Number of iterations for doing loop optimiztions 273 int _max_inline_size; // Max inline size for this compilation 274 int _freq_inline_size; // Max hot method inline size for this compilation 275 int _fixed_slots; // count of frame slots not allocated by the register 276 // allocator i.e. locks, original deopt pc, etc. 277 // For deopt 278 int _orig_pc_slot; 279 int _orig_pc_slot_offset_in_bytes; 280 281 int _major_progress; // Count of something big happening 282 bool _has_loops; // True if the method _may_ have some loops 283 bool _has_split_ifs; // True if the method _may_ have some split-if 284 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. 285 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated 286 int _max_vector_size; // Maximum size of generated vectors 287 uint _trap_hist[trapHistLength]; // Cumulative traps 288 bool _trap_can_recompile; // Have we emitted a recompiling trap? 289 uint _decompile_count; // Cumulative decompilation counts. 290 bool _do_inlining; // True if we intend to do inlining 291 bool _do_scheduling; // True if we intend to do scheduling 292 bool _do_freq_based_layout; // True if we intend to do frequency based block layout 293 bool _do_count_invocations; // True if we generate code to count invocations 294 bool _do_method_data_update; // True if we generate code to update MethodData*s 295 int _AliasLevel; // Locally-adjusted version of AliasLevel flag. 296 bool _print_assembly; // True if we should dump assembly code for this compilation 297 #ifndef PRODUCT 298 bool _trace_opto_output; 299 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing 300 #endif 301 302 // JSR 292 303 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. 304 305 // Compilation environment. 306 Arena _comp_arena; // Arena with lifetime equivalent to Compile 307 ciEnv* _env; // CI interface 308 CompileLog* _log; // from CompilerThread 309 const char* _failure_reason; // for record_failure/failing pattern 310 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. 311 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. 312 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. 313 ConnectionGraph* _congraph; 314 #ifndef PRODUCT 315 IdealGraphPrinter* _printer; 316 #endif 317 318 // Node management 319 uint _unique; // Counter for unique Node indices 320 VectorSet _dead_node_list; // Set of dead nodes 321 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N). 322 // So use this to keep count and make the call O(1). 323 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> 324 Arena _node_arena; // Arena for new-space Nodes 325 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform 326 RootNode* _root; // Unique root of compilation, or NULL after bail-out. 327 Node* _top; // Unique top node. (Reset by various phases.) 328 329 Node* _immutable_memory; // Initial memory state 330 331 Node* _recent_alloc_obj; 332 Node* _recent_alloc_ctl; 333 334 // Constant table 335 ConstantTable _constant_table; // The constant table for this compile. 336 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. 337 338 339 // Blocked array of debugging and profiling information, 340 // tracked per node. 341 enum { _log2_node_notes_block_size = 8, 342 _node_notes_block_size = (1<<_log2_node_notes_block_size) 343 }; 344 GrowableArray<Node_Notes*>* _node_note_array; 345 Node_Notes* _default_node_notes; // default notes for new nodes 346 347 // After parsing and every bulk phase we hang onto the Root instruction. 348 // The RootNode instruction is where the whole program begins. It produces 349 // the initial Control and BOTTOM for everybody else. 350 351 // Type management 352 Arena _Compile_types; // Arena for all types 353 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() 354 Dict* _type_dict; // Intern table 355 void* _type_hwm; // Last allocation (see Type::operator new/delete) 356 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) 357 ciMethod* _last_tf_m; // Cache for 358 const TypeFunc* _last_tf; // TypeFunc::make 359 AliasType** _alias_types; // List of alias types seen so far. 360 int _num_alias_types; // Logical length of _alias_types 361 int _max_alias_types; // Physical length of _alias_types 362 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking 363 364 // Parsing, optimization 365 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN 366 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN 367 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. 368 369 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after 370 // main parsing has finished. 371 372 // Matching, CFG layout, allocation, code generation 373 PhaseCFG* _cfg; // Results of CFG finding 374 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result 375 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results 376 int _java_calls; // Number of java calls in the method 377 int _inner_loops; // Number of inner loops in the method 378 Matcher* _matcher; // Engine to map ideal to machine instructions 379 PhaseRegAlloc* _regalloc; // Results of register allocation. 380 int _frame_slots; // Size of total frame in stack slots 381 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries 382 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) 383 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin 384 void* _indexSet_free_block_list; // free list of IndexSet bit blocks 385 386 uint _node_bundling_limit; 387 Bundle* _node_bundling_base; // Information for instruction bundling 388 389 // Instruction bits passed off to the VM 390 int _method_size; // Size of nmethod code segment in bytes 391 CodeBuffer _code_buffer; // Where the code is assembled 392 int _first_block_size; // Size of unvalidated entry point code / OSR poison code 393 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers 394 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code 395 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location) 396 static int _CompiledZap_count; // counter compared against CompileZap[First/Last] 397 BufferBlob* _scratch_buffer_blob; // For temporary code buffers. 398 relocInfo* _scratch_locs_memory; // For temporary code buffers. 399 int _scratch_const_size; // For temporary code buffers. 400 bool _in_scratch_emit_size; // true when in scratch_emit_size. 401 402 public: 403 // Accessors 404 405 // The Compile instance currently active in this (compiler) thread. 406 static Compile* current() { 407 return (Compile*) ciEnv::current()->compiler_data(); 408 } 409 410 // ID for this compilation. Useful for setting breakpoints in the debugger. 411 int compile_id() const { return _compile_id; } 412 413 // Does this compilation allow instructions to subsume loads? User 414 // instructions that subsume a load may result in an unschedulable 415 // instruction sequence. 416 bool subsume_loads() const { return _subsume_loads; } 417 // Do escape analysis. 418 bool do_escape_analysis() const { return _do_escape_analysis; } 419 bool save_argument_registers() const { return _save_argument_registers; } 420 421 422 // Other fixed compilation parameters. 423 ciMethod* method() const { return _method; } 424 int entry_bci() const { return _entry_bci; } 425 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } 426 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } 427 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } 428 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } 429 InlineTree* ilt() const { return _ilt; } 430 address stub_function() const { return _stub_function; } 431 const char* stub_name() const { return _stub_name; } 432 address stub_entry_point() const { return _stub_entry_point; } 433 434 // Control of this compilation. 435 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } 436 void set_fixed_slots(int n) { _fixed_slots = n; } 437 int major_progress() const { return _major_progress; } 438 void set_major_progress() { _major_progress++; } 439 void clear_major_progress() { _major_progress = 0; } 440 int num_loop_opts() const { return _num_loop_opts; } 441 void set_num_loop_opts(int n) { _num_loop_opts = n; } 442 int max_inline_size() const { return _max_inline_size; } 443 void set_freq_inline_size(int n) { _freq_inline_size = n; } 444 int freq_inline_size() const { return _freq_inline_size; } 445 void set_max_inline_size(int n) { _max_inline_size = n; } 446 bool has_loops() const { return _has_loops; } 447 void set_has_loops(bool z) { _has_loops = z; } 448 bool has_split_ifs() const { return _has_split_ifs; } 449 void set_has_split_ifs(bool z) { _has_split_ifs = z; } 450 bool has_unsafe_access() const { return _has_unsafe_access; } 451 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 452 bool has_stringbuilder() const { return _has_stringbuilder; } 453 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } 454 int max_vector_size() const { return _max_vector_size; } 455 void set_max_vector_size(int s) { _max_vector_size = s; } 456 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } 457 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } 458 bool trap_can_recompile() const { return _trap_can_recompile; } 459 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } 460 uint decompile_count() const { return _decompile_count; } 461 void set_decompile_count(uint c) { _decompile_count = c; } 462 bool allow_range_check_smearing() const; 463 bool do_inlining() const { return _do_inlining; } 464 void set_do_inlining(bool z) { _do_inlining = z; } 465 bool do_scheduling() const { return _do_scheduling; } 466 void set_do_scheduling(bool z) { _do_scheduling = z; } 467 bool do_freq_based_layout() const{ return _do_freq_based_layout; } 468 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } 469 bool do_count_invocations() const{ return _do_count_invocations; } 470 void set_do_count_invocations(bool z){ _do_count_invocations = z; } 471 bool do_method_data_update() const { return _do_method_data_update; } 472 void set_do_method_data_update(bool z) { _do_method_data_update = z; } 473 int AliasLevel() const { return _AliasLevel; } 474 bool print_assembly() const { return _print_assembly; } 475 void set_print_assembly(bool z) { _print_assembly = z; } 476 // check the CompilerOracle for special behaviours for this compile 477 bool method_has_option(const char * option) { 478 return method() != NULL && method()->has_option(option); 479 } 480 #ifndef PRODUCT 481 bool trace_opto_output() const { return _trace_opto_output; } 482 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } 483 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } 484 #endif 485 486 // JSR 292 487 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 488 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 489 490 void begin_method() { 491 #ifndef PRODUCT 492 if (_printer) _printer->begin_method(this); 493 #endif 494 } 495 void print_method(const char * name, int level = 1) { 496 #ifndef PRODUCT 497 if (_printer) _printer->print_method(this, name, level); 498 #endif 499 } 500 void end_method() { 501 #ifndef PRODUCT 502 if (_printer) _printer->end_method(); 503 #endif 504 } 505 506 int macro_count() { return _macro_nodes->length(); } 507 int predicate_count() { return _predicate_opaqs->length();} 508 Node* macro_node(int idx) { return _macro_nodes->at(idx); } 509 Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);} 510 ConnectionGraph* congraph() { return _congraph;} 511 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} 512 void add_macro_node(Node * n) { 513 //assert(n->is_macro(), "must be a macro node"); 514 assert(!_macro_nodes->contains(n), " duplicate entry in expand list"); 515 _macro_nodes->append(n); 516 } 517 void remove_macro_node(Node * n) { 518 // this function may be called twice for a node so check 519 // that the node is in the array before attempting to remove it 520 if (_macro_nodes->contains(n)) 521 _macro_nodes->remove(n); 522 // remove from _predicate_opaqs list also if it is there 523 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ 524 _predicate_opaqs->remove(n); 525 } 526 } 527 void add_predicate_opaq(Node * n) { 528 assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1"); 529 assert(_macro_nodes->contains(n), "should have already been in macro list"); 530 _predicate_opaqs->append(n); 531 } 532 // remove the opaque nodes that protect the predicates so that the unused checks and 533 // uncommon traps will be eliminated from the graph. 534 void cleanup_loop_predicates(PhaseIterGVN &igvn); 535 bool is_predicate_opaq(Node * n) { 536 return _predicate_opaqs->contains(n); 537 } 538 539 // Compilation environment. 540 Arena* comp_arena() { return &_comp_arena; } 541 ciEnv* env() const { return _env; } 542 CompileLog* log() const { return _log; } 543 bool failing() const { return _env->failing() || _failure_reason != NULL; } 544 const char* failure_reason() { return _failure_reason; } 545 bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); } 546 547 void record_failure(const char* reason); 548 void record_method_not_compilable(const char* reason, bool all_tiers = false) { 549 // All bailouts cover "all_tiers" when TieredCompilation is off. 550 if (!TieredCompilation) all_tiers = true; 551 env()->record_method_not_compilable(reason, all_tiers); 552 // Record failure reason. 553 record_failure(reason); 554 } 555 void record_method_not_compilable_all_tiers(const char* reason) { 556 record_method_not_compilable(reason, true); 557 } 558 bool check_node_count(uint margin, const char* reason) { 559 if (live_nodes() + margin > (uint)MaxNodeLimit) { 560 record_method_not_compilable(reason); 561 return true; 562 } else { 563 return false; 564 } 565 } 566 567 // Node management 568 uint unique() const { return _unique; } 569 uint next_unique() { return _unique++; } 570 void set_unique(uint i) { _unique = i; } 571 static int debug_idx() { return debug_only(_debug_idx)+0; } 572 static void set_debug_idx(int i) { debug_only(_debug_idx = i); } 573 Arena* node_arena() { return &_node_arena; } 574 Arena* old_arena() { return &_old_arena; } 575 RootNode* root() const { return _root; } 576 void set_root(RootNode* r) { _root = r; } 577 StartNode* start() const; // (Derived from root.) 578 void init_start(StartNode* s); 579 Node* immutable_memory(); 580 581 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } 582 Node* recent_alloc_obj() const { return _recent_alloc_obj; } 583 void set_recent_alloc(Node* ctl, Node* obj) { 584 _recent_alloc_ctl = ctl; 585 _recent_alloc_obj = obj; 586 } 587 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; 588 _dead_node_count++; 589 } 590 uint dead_node_count() { return _dead_node_count; } 591 void reset_dead_node_list() { _dead_node_list.Reset(); 592 _dead_node_count = 0; 593 } 594 uint live_nodes() { 595 int val = _unique - _dead_node_count; 596 assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count)); 597 return (uint) val; 598 } 599 #ifdef ASSERT 600 uint count_live_nodes_by_graph_walk(); 601 void print_missing_nodes(); 602 #endif 603 604 // Constant table 605 ConstantTable& constant_table() { return _constant_table; } 606 607 MachConstantBaseNode* mach_constant_base_node(); 608 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } 609 610 // Handy undefined Node 611 Node* top() const { return _top; } 612 613 // these are used by guys who need to know about creation and transformation of top: 614 Node* cached_top_node() { return _top; } 615 void set_cached_top_node(Node* tn); 616 617 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } 618 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } 619 Node_Notes* default_node_notes() const { return _default_node_notes; } 620 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } 621 622 Node_Notes* node_notes_at(int idx) { 623 return locate_node_notes(_node_note_array, idx, false); 624 } 625 inline bool set_node_notes_at(int idx, Node_Notes* value); 626 627 // Copy notes from source to dest, if they exist. 628 // Overwrite dest only if source provides something. 629 // Return true if information was moved. 630 bool copy_node_notes_to(Node* dest, Node* source); 631 632 // Workhorse function to sort out the blocked Node_Notes array: 633 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, 634 int idx, bool can_grow = false); 635 636 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); 637 638 // Type management 639 Arena* type_arena() { return _type_arena; } 640 Dict* type_dict() { return _type_dict; } 641 void* type_hwm() { return _type_hwm; } 642 size_t type_last_size() { return _type_last_size; } 643 int num_alias_types() { return _num_alias_types; } 644 645 void init_type_arena() { _type_arena = &_Compile_types; } 646 void set_type_arena(Arena* a) { _type_arena = a; } 647 void set_type_dict(Dict* d) { _type_dict = d; } 648 void set_type_hwm(void* p) { _type_hwm = p; } 649 void set_type_last_size(size_t sz) { _type_last_size = sz; } 650 651 const TypeFunc* last_tf(ciMethod* m) { 652 return (m == _last_tf_m) ? _last_tf : NULL; 653 } 654 void set_last_tf(ciMethod* m, const TypeFunc* tf) { 655 assert(m != NULL || tf == NULL, ""); 656 _last_tf_m = m; 657 _last_tf = tf; 658 } 659 660 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 661 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } 662 bool have_alias_type(const TypePtr* adr_type); 663 AliasType* alias_type(ciField* field); 664 665 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); } 666 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } 667 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } 668 669 // Building nodes 670 void rethrow_exceptions(JVMState* jvms); 671 void return_values(JVMState* jvms); 672 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); 673 674 // Decide how to build a call. 675 // The profile factor is a discount to apply to this site's interp. profile. 676 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true); 677 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms); 678 679 // Report if there were too many traps at a current method and bci. 680 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 681 // If there is no MDO at all, report no trap unless told to assume it. 682 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 683 // This version, unspecific to a particular bci, asks if 684 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. 685 bool too_many_traps(Deoptimization::DeoptReason reason, 686 // Privately used parameter for logging: 687 ciMethodData* logmd = NULL); 688 // Report if there were too many recompiles at a method and bci. 689 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 690 691 // Parsing, optimization 692 PhaseGVN* initial_gvn() { return _initial_gvn; } 693 Unique_Node_List* for_igvn() { return _for_igvn; } 694 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. 695 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } 696 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } 697 698 // Replace n by nn using initial_gvn, calling hash_delete and 699 // record_for_igvn as needed. 700 void gvn_replace_by(Node* n, Node* nn); 701 702 703 void identify_useful_nodes(Unique_Node_List &useful); 704 void update_dead_node_list(Unique_Node_List &useful); 705 void remove_useless_nodes (Unique_Node_List &useful); 706 707 WarmCallInfo* warm_calls() const { return _warm_calls; } 708 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } 709 WarmCallInfo* pop_warm_call(); 710 711 // Record this CallGenerator for inlining at the end of parsing. 712 void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); } 713 714 // Matching, CFG layout, allocation, code generation 715 PhaseCFG* cfg() { return _cfg; } 716 bool select_24_bit_instr() const { return _select_24_bit_instr; } 717 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } 718 bool has_java_calls() const { return _java_calls > 0; } 719 int java_calls() const { return _java_calls; } 720 int inner_loops() const { return _inner_loops; } 721 Matcher* matcher() { return _matcher; } 722 PhaseRegAlloc* regalloc() { return _regalloc; } 723 int frame_slots() const { return _frame_slots; } 724 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words' 725 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } 726 Arena* indexSet_arena() { return _indexSet_arena; } 727 void* indexSet_free_block_list() { return _indexSet_free_block_list; } 728 uint node_bundling_limit() { return _node_bundling_limit; } 729 Bundle* node_bundling_base() { return _node_bundling_base; } 730 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; } 731 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; } 732 bool starts_bundle(const Node *n) const; 733 bool need_stack_bang(int frame_size_in_bytes) const; 734 bool need_register_stack_bang() const; 735 736 void set_matcher(Matcher* m) { _matcher = m; } 737 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } 738 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } 739 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } 740 741 // Remember if this compilation changes hardware mode to 24-bit precision 742 void set_24_bit_selection_and_mode(bool selection, bool mode) { 743 _select_24_bit_instr = selection; 744 _in_24_bit_fp_mode = mode; 745 } 746 747 void set_java_calls(int z) { _java_calls = z; } 748 void set_inner_loops(int z) { _inner_loops = z; } 749 750 // Instruction bits passed off to the VM 751 int code_size() { return _method_size; } 752 CodeBuffer* code_buffer() { return &_code_buffer; } 753 int first_block_size() { return _first_block_size; } 754 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } 755 ExceptionHandlerTable* handler_table() { return &_handler_table; } 756 ImplicitExceptionTable* inc_table() { return &_inc_table; } 757 OopMapSet* oop_map_set() { return _oop_map_set; } 758 DebugInformationRecorder* debug_info() { return env()->debug_info(); } 759 Dependencies* dependencies() { return env()->dependencies(); } 760 static int CompiledZap_count() { return _CompiledZap_count; } 761 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } 762 void init_scratch_buffer_blob(int const_size); 763 void clear_scratch_buffer_blob(); 764 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } 765 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } 766 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } 767 768 // emit to scratch blob, report resulting size 769 uint scratch_emit_size(const Node* n); 770 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; } 771 bool in_scratch_emit_size() const { return _in_scratch_emit_size; } 772 773 enum ScratchBufferBlob { 774 MAX_inst_size = 1024, 775 MAX_locs_size = 128, // number of relocInfo elements 776 MAX_const_size = 128, 777 MAX_stubs_size = 128 778 }; 779 780 // Major entry point. Given a Scope, compile the associated method. 781 // For normal compilations, entry_bci is InvocationEntryBci. For on stack 782 // replacement, entry_bci indicates the bytecode for which to compile a 783 // continuation. 784 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, 785 int entry_bci, bool subsume_loads, bool do_escape_analysis); 786 787 // Second major entry point. From the TypeFunc signature, generate code 788 // to pass arguments from the Java calling convention to the C calling 789 // convention. 790 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), 791 address stub_function, const char *stub_name, 792 int is_fancy_jump, bool pass_tls, 793 bool save_arg_registers, bool return_pc); 794 795 // From the TypeFunc signature, generate code to pass arguments 796 // from Compiled calling convention to Interpreter's calling convention 797 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); 798 799 // From the TypeFunc signature, generate code to pass arguments 800 // from Interpreter's calling convention to Compiler's calling convention 801 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); 802 803 // Are we compiling a method? 804 bool has_method() { return method() != NULL; } 805 806 // Maybe print some information about this compile. 807 void print_compile_messages(); 808 809 // Final graph reshaping, a post-pass after the regular optimizer is done. 810 bool final_graph_reshaping(); 811 812 // returns true if adr is completely contained in the given alias category 813 bool must_alias(const TypePtr* adr, int alias_idx); 814 815 // returns true if adr overlaps with the given alias category 816 bool can_alias(const TypePtr* adr, int alias_idx); 817 818 // Driver for converting compiler's IR into machine code bits 819 void Output(); 820 821 // Accessors for node bundling info. 822 Bundle* node_bundling(const Node *n); 823 bool valid_bundle_info(const Node *n); 824 825 // Schedule and Bundle the instructions 826 void ScheduleAndBundle(); 827 828 // Build OopMaps for each GC point 829 void BuildOopMaps(); 830 831 // Append debug info for the node "local" at safepoint node "sfpt" to the 832 // "array", May also consult and add to "objs", which describes the 833 // scalar-replaced objects. 834 void FillLocArray( int idx, MachSafePointNode* sfpt, 835 Node *local, GrowableArray<ScopeValue*> *array, 836 GrowableArray<ScopeValue*> *objs ); 837 838 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. 839 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); 840 // Requres that "objs" does not contains an ObjectValue whose id matches 841 // that of "sv. Appends "sv". 842 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, 843 ObjectValue* sv ); 844 845 // Process an OopMap Element while emitting nodes 846 void Process_OopMap_Node(MachNode *mach, int code_offset); 847 848 // Initialize code buffer 849 CodeBuffer* init_buffer(uint* blk_starts); 850 851 // Write out basic block data to code buffer 852 void fill_buffer(CodeBuffer* cb, uint* blk_starts); 853 854 // Determine which variable sized branches can be shortened 855 void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size); 856 857 // Compute the size of first NumberOfLoopInstrToAlign instructions 858 // at the head of a loop. 859 void compute_loop_first_inst_sizes(); 860 861 // Compute the information for the exception tables 862 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels); 863 864 // Stack slots that may be unused by the calling convention but must 865 // otherwise be preserved. On Intel this includes the return address. 866 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 867 uint in_preserve_stack_slots(); 868 869 // "Top of Stack" slots that may be unused by the calling convention but must 870 // otherwise be preserved. 871 // On Intel these are not necessary and the value can be zero. 872 // On Sparc this describes the words reserved for storing a register window 873 // when an interrupt occurs. 874 static uint out_preserve_stack_slots(); 875 876 // Number of outgoing stack slots killed above the out_preserve_stack_slots 877 // for calls to C. Supports the var-args backing area for register parms. 878 uint varargs_C_out_slots_killed() const; 879 880 // Number of Stack Slots consumed by a synchronization entry 881 int sync_stack_slots() const; 882 883 // Compute the name of old_SP. See <arch>.ad for frame layout. 884 OptoReg::Name compute_old_SP(); 885 886 #ifdef ENABLE_ZAP_DEAD_LOCALS 887 static bool is_node_getting_a_safepoint(Node*); 888 void Insert_zap_nodes(); 889 Node* call_zap_node(MachSafePointNode* n, int block_no); 890 #endif 891 892 private: 893 // Phase control: 894 void Init(int aliaslevel); // Prepare for a single compilation 895 int Inline_Warm(); // Find more inlining work. 896 void Finish_Warm(); // Give up on further inlines. 897 void Optimize(); // Given a graph, optimize it 898 void Code_Gen(); // Generate code from a graph 899 900 // Management of the AliasType table. 901 void grow_alias_types(); 902 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 903 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 904 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field); 905 906 void verify_top(Node*) const PRODUCT_RETURN; 907 908 // Intrinsic setup. 909 void register_library_intrinsics(); // initializer 910 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor 911 int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper 912 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn 913 void register_intrinsic(CallGenerator* cg); // update fn 914 915 #ifndef PRODUCT 916 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; 917 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; 918 #endif 919 // Function calls made by the public function final_graph_reshaping. 920 // No need to be made public as they are not called elsewhere. 921 void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc); 922 void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ); 923 void eliminate_redundant_card_marks(Node* n); 924 925 public: 926 927 // Note: Histogram array size is about 1 Kb. 928 enum { // flag bits: 929 _intrinsic_worked = 1, // succeeded at least once 930 _intrinsic_failed = 2, // tried it but it failed 931 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) 932 _intrinsic_virtual = 8, // was seen in the virtual form (rare) 933 _intrinsic_both = 16 // was seen in the non-virtual form (usual) 934 }; 935 // Update histogram. Return boolean if this is a first-time occurrence. 936 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, 937 bool is_virtual, int flags) PRODUCT_RETURN0; 938 static void print_intrinsic_statistics() PRODUCT_RETURN; 939 940 // Graph verification code 941 // Walk the node list, verifying that there is a one-to-one 942 // correspondence between Use-Def edges and Def-Use edges 943 // The option no_dead_code enables stronger checks that the 944 // graph is strongly connected from root in both directions. 945 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; 946 947 // End-of-run dumps. 948 static void print_statistics() PRODUCT_RETURN; 949 950 // Dump formatted assembly 951 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN; 952 void dump_pc(int *pcs, int pc_limit, Node *n); 953 954 // Verify ADLC assumptions during startup 955 static void adlc_verification() PRODUCT_RETURN; 956 957 // Definitions of pd methods 958 static void pd_compiler2_init(); 959 }; 960 961 #endif // SHARE_VM_OPTO_COMPILE_HPP