1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_COMPILE_HPP 26 #define SHARE_OPTO_COMPILE_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "ci/compilerInterface.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "code/exceptionHandlerTable.hpp" 32 #include "compiler/compilerOracle.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "libadt/dict.hpp" 35 #include "libadt/vectset.hpp" 36 #include "jfr/jfrEvents.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/methodData.hpp" 39 #include "opto/idealGraphPrinter.hpp" 40 #include "opto/phasetype.hpp" 41 #include "opto/phase.hpp" 42 #include "opto/regmask.hpp" 43 #include "runtime/deoptimization.hpp" 44 #include "runtime/timerTrace.hpp" 45 #include "runtime/vmThread.hpp" 46 #include "utilities/ticks.hpp" 47 48 class AddPNode; 49 class Block; 50 class Bundle; 51 class CallNode; 52 class C2Compiler; 53 class CallGenerator; 54 class CloneMap; 55 class ConnectionGraph; 56 class InlineTree; 57 class Int_Array; 58 class LoadBarrierNode; 59 class Matcher; 60 class MachConstantNode; 61 class MachConstantBaseNode; 62 class MachNode; 63 class MachOper; 64 class MachSafePointNode; 65 class Node; 66 class Node_Array; 67 class Node_Notes; 68 class NodeCloneInfo; 69 class OptoReg; 70 class PhaseCFG; 71 class PhaseGVN; 72 class PhaseIterGVN; 73 class PhaseRegAlloc; 74 class PhaseCCP; 75 class PhaseCCP_DCE; 76 class RootNode; 77 class relocInfo; 78 class Scope; 79 class StartNode; 80 class SafePointNode; 81 class JVMState; 82 class Type; 83 class TypeData; 84 class TypeInt; 85 class TypePtr; 86 class TypeOopPtr; 87 class TypeFunc; 88 class ValueTypeBaseNode; 89 class Unique_Node_List; 90 class nmethod; 91 class WarmCallInfo; 92 class Node_Stack; 93 struct Final_Reshape_Counts; 94 95 enum LoopOptsMode { 96 LoopOptsDefault, 97 LoopOptsNone, 98 LoopOptsShenandoahExpand, 99 LoopOptsShenandoahPostExpand, 100 LoopOptsSkipSplitIf, 101 LoopOptsVerify, 102 LoopOptsLastRound 103 }; 104 105 typedef unsigned int node_idx_t; 106 class NodeCloneInfo { 107 private: 108 uint64_t _idx_clone_orig; 109 public: 110 111 void set_idx(node_idx_t idx) { 112 _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx; 113 } 114 node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); } 115 116 void set_gen(int generation) { 117 uint64_t g = (uint64_t)generation << 32; 118 _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g; 119 } 120 int gen() const { return (int)(_idx_clone_orig >> 32); } 121 122 void set(uint64_t x) { _idx_clone_orig = x; } 123 void set(node_idx_t x, int g) { set_idx(x); set_gen(g); } 124 uint64_t get() const { return _idx_clone_orig; } 125 126 NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {} 127 NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); } 128 129 void dump() const; 130 }; 131 132 class CloneMap { 133 friend class Compile; 134 private: 135 bool _debug; 136 Dict* _dict; 137 int _clone_idx; // current cloning iteration/generation in loop unroll 138 public: 139 void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy 140 node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; } 141 Dict* dict() const { return _dict; } 142 void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); } 143 void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); } 144 void remove(node_idx_t key) { _dict->Delete(_2p(key)); } 145 uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); } 146 node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); } 147 int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); } 148 int gen(const void* k) const { return gen(_2_node_idx_t(k)); } 149 int max_gen() const; 150 void clone(Node* old, Node* nnn, int gen); 151 void verify_insert_and_clone(Node* old, Node* nnn, int gen); 152 void dump(node_idx_t key) const; 153 154 int clone_idx() const { return _clone_idx; } 155 void set_clone_idx(int x) { _clone_idx = x; } 156 bool is_debug() const { return _debug; } 157 void set_debug(bool debug) { _debug = debug; } 158 static const char* debug_option_name; 159 160 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); } 161 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); } 162 }; 163 164 //------------------------------Compile---------------------------------------- 165 // This class defines a top-level Compiler invocation. 166 167 class Compile : public Phase { 168 friend class VMStructs; 169 170 public: 171 // Fixed alias indexes. (See also MergeMemNode.) 172 enum { 173 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) 174 AliasIdxBot = 2, // pseudo-index, aliases to everything 175 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM 176 }; 177 178 // Variant of TraceTime(NULL, &_t_accumulator, CITime); 179 // Integrated with logging. If logging is turned on, and CITimeVerbose is true, 180 // then brackets are put into the log, with time stamps and node counts. 181 // (The time collection itself is always conditionalized on CITime.) 182 class TracePhase : public TraceTime { 183 private: 184 Compile* C; 185 CompileLog* _log; 186 const char* _phase_name; 187 bool _dolog; 188 public: 189 TracePhase(const char* name, elapsedTimer* accumulator); 190 ~TracePhase(); 191 }; 192 193 // Information per category of alias (memory slice) 194 class AliasType { 195 private: 196 friend class Compile; 197 198 int _index; // unique index, used with MergeMemNode 199 const TypePtr* _adr_type; // normalized address type 200 ciField* _field; // relevant instance field, or null if none 201 const Type* _element; // relevant array element type, or null if none 202 bool _is_rewritable; // false if the memory is write-once only 203 int _general_index; // if this is type is an instance, the general 204 // type that this is an instance of 205 206 void Init(int i, const TypePtr* at); 207 208 public: 209 int index() const { return _index; } 210 const TypePtr* adr_type() const { return _adr_type; } 211 ciField* field() const { return _field; } 212 const Type* element() const { return _element; } 213 bool is_rewritable() const { return _is_rewritable; } 214 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } 215 int general_index() const { return (_general_index != 0) ? _general_index : _index; } 216 217 void set_rewritable(bool z) { _is_rewritable = z; } 218 void set_field(ciField* f) { 219 assert(!_field,""); 220 _field = f; 221 if (f->is_final() || f->is_stable()) { 222 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops. 223 _is_rewritable = false; 224 } 225 } 226 void set_element(const Type* e) { 227 assert(_element == NULL, ""); 228 _element = e; 229 } 230 231 BasicType basic_type() const; 232 233 void print_on(outputStream* st) PRODUCT_RETURN; 234 }; 235 236 enum { 237 logAliasCacheSize = 6, 238 AliasCacheSize = (1<<logAliasCacheSize) 239 }; 240 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type 241 enum { 242 trapHistLength = MethodData::_trap_hist_limit 243 }; 244 245 // Constant entry of the constant table. 246 class Constant { 247 private: 248 BasicType _type; 249 union { 250 jvalue _value; 251 Metadata* _metadata; 252 } _v; 253 int _offset; // offset of this constant (in bytes) relative to the constant table base. 254 float _freq; 255 bool _can_be_reused; // true (default) if the value can be shared with other users. 256 257 public: 258 Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _v._value.l = 0; } 259 Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) : 260 _type(type), 261 _offset(-1), 262 _freq(freq), 263 _can_be_reused(can_be_reused) 264 { 265 assert(type != T_METADATA, "wrong constructor"); 266 _v._value = value; 267 } 268 Constant(Metadata* metadata, bool can_be_reused = true) : 269 _type(T_METADATA), 270 _offset(-1), 271 _freq(0.0f), 272 _can_be_reused(can_be_reused) 273 { 274 _v._metadata = metadata; 275 } 276 277 bool operator==(const Constant& other); 278 279 BasicType type() const { return _type; } 280 281 jint get_jint() const { return _v._value.i; } 282 jlong get_jlong() const { return _v._value.j; } 283 jfloat get_jfloat() const { return _v._value.f; } 284 jdouble get_jdouble() const { return _v._value.d; } 285 jobject get_jobject() const { return _v._value.l; } 286 287 Metadata* get_metadata() const { return _v._metadata; } 288 289 int offset() const { return _offset; } 290 void set_offset(int offset) { _offset = offset; } 291 292 float freq() const { return _freq; } 293 void inc_freq(float freq) { _freq += freq; } 294 295 bool can_be_reused() const { return _can_be_reused; } 296 }; 297 298 // Constant table. 299 class ConstantTable { 300 private: 301 GrowableArray<Constant> _constants; // Constants of this table. 302 int _size; // Size in bytes the emitted constant table takes (including padding). 303 int _table_base_offset; // Offset of the table base that gets added to the constant offsets. 304 int _nof_jump_tables; // Number of jump-tables in this constant table. 305 306 static int qsort_comparator(Constant* a, Constant* b); 307 308 // We use negative frequencies to keep the order of the 309 // jump-tables in which they were added. Otherwise we get into 310 // trouble with relocation. 311 float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); } 312 313 public: 314 ConstantTable() : 315 _size(-1), 316 _table_base_offset(-1), // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit). 317 _nof_jump_tables(0) 318 {} 319 320 int size() const { assert(_size != -1, "not calculated yet"); return _size; } 321 322 int calculate_table_base_offset() const; // AD specific 323 void set_table_base_offset(int x) { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; } 324 int table_base_offset() const { assert(_table_base_offset != -1, "not set yet"); return _table_base_offset; } 325 326 void emit(CodeBuffer& cb); 327 328 // Returns the offset of the last entry (the top) of the constant table. 329 int top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); } 330 331 void calculate_offsets_and_size(); 332 int find_offset(Constant& con) const; 333 334 void add(Constant& con); 335 Constant add(MachConstantNode* n, BasicType type, jvalue value); 336 Constant add(Metadata* metadata); 337 Constant add(MachConstantNode* n, MachOper* oper); 338 Constant add(MachConstantNode* n, jint i) { 339 jvalue value; value.i = i; 340 return add(n, T_INT, value); 341 } 342 Constant add(MachConstantNode* n, jlong j) { 343 jvalue value; value.j = j; 344 return add(n, T_LONG, value); 345 } 346 Constant add(MachConstantNode* n, jfloat f) { 347 jvalue value; value.f = f; 348 return add(n, T_FLOAT, value); 349 } 350 Constant add(MachConstantNode* n, jdouble d) { 351 jvalue value; value.d = d; 352 return add(n, T_DOUBLE, value); 353 } 354 355 // Jump-table 356 Constant add_jump_table(MachConstantNode* n); 357 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const; 358 }; 359 360 private: 361 // Fixed parameters to this compilation. 362 const int _compile_id; 363 const bool _save_argument_registers; // save/restore arg regs for trampolines 364 const bool _subsume_loads; // Load can be matched as part of a larger op. 365 const bool _do_escape_analysis; // Do escape analysis. 366 const bool _eliminate_boxing; // Do boxing elimination. 367 ciMethod* _method; // The method being compiled. 368 int _entry_bci; // entry bci for osr methods. 369 const TypeFunc* _tf; // My kind of signature 370 InlineTree* _ilt; // Ditto (temporary). 371 address _stub_function; // VM entry for stub being compiled, or NULL 372 const char* _stub_name; // Name of stub or adapter being compiled, or NULL 373 address _stub_entry_point; // Compile code entry for generated stub, or NULL 374 375 // Control of this compilation. 376 int _max_inline_size; // Max inline size for this compilation 377 int _freq_inline_size; // Max hot method inline size for this compilation 378 int _fixed_slots; // count of frame slots not allocated by the register 379 // allocator i.e. locks, original deopt pc, etc. 380 uintx _max_node_limit; // Max unique node count during a single compilation. 381 // For deopt 382 int _orig_pc_slot; 383 int _orig_pc_slot_offset_in_bytes; 384 385 // For value type calling convention 386 int _sp_inc_slot; 387 int _sp_inc_slot_offset_in_bytes; 388 389 int _major_progress; // Count of something big happening 390 bool _inlining_progress; // progress doing incremental inlining? 391 bool _inlining_incrementally;// Are we doing incremental inlining (post parse) 392 bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining 393 bool _has_loops; // True if the method _may_ have some loops 394 bool _has_split_ifs; // True if the method _may_ have some split-if 395 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. 396 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated 397 bool _has_boxed_value; // True if a boxed object is allocated 398 bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess 399 uint _max_vector_size; // Maximum size of generated vectors 400 bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper 401 uint _trap_hist[trapHistLength]; // Cumulative traps 402 bool _trap_can_recompile; // Have we emitted a recompiling trap? 403 uint _decompile_count; // Cumulative decompilation counts. 404 bool _do_inlining; // True if we intend to do inlining 405 bool _do_scheduling; // True if we intend to do scheduling 406 bool _do_freq_based_layout; // True if we intend to do frequency based block layout 407 bool _do_count_invocations; // True if we generate code to count invocations 408 bool _do_method_data_update; // True if we generate code to update MethodData*s 409 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations 410 bool _use_cmove; // True if CMove should be used without profitability analysis 411 bool _age_code; // True if we need to profile code age (decrement the aging counter) 412 int _AliasLevel; // Locally-adjusted version of AliasLevel flag. 413 bool _print_assembly; // True if we should dump assembly code for this compilation 414 bool _print_inlining; // True if we should print inlining for this compilation 415 bool _print_intrinsics; // True if we should print intrinsics for this compilation 416 #ifndef PRODUCT 417 bool _trace_opto_output; 418 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing 419 #endif 420 bool _has_irreducible_loop; // Found irreducible loops 421 // JSR 292 422 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. 423 RTMState _rtm_state; // State of Restricted Transactional Memory usage 424 int _loop_opts_cnt; // loop opts round 425 bool _has_flattened_accesses; // Any known flattened array accesses? 426 bool _flattened_accesses_share_alias; // Initially all flattened array share a single slice 427 428 // Compilation environment. 429 Arena _comp_arena; // Arena with lifetime equivalent to Compile 430 void* _barrier_set_state; // Potential GC barrier state for Compile 431 ciEnv* _env; // CI interface 432 DirectiveSet* _directive; // Compiler directive 433 CompileLog* _log; // from CompilerThread 434 const char* _failure_reason; // for record_failure/failing pattern 435 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. 436 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. 437 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. 438 GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common 439 GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency 440 GrowableArray<Node*>* _opaque4_nodes; // List of Opaque4 nodes that have a default value 441 Unique_Node_List* _value_type_nodes; // List of ValueType nodes 442 ConnectionGraph* _congraph; 443 #ifndef PRODUCT 444 IdealGraphPrinter* _printer; 445 #endif 446 447 448 // Node management 449 uint _unique; // Counter for unique Node indices 450 VectorSet _dead_node_list; // Set of dead nodes 451 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N). 452 // So use this to keep count and make the call O(1). 453 DEBUG_ONLY( Unique_Node_List* _modified_nodes; ) // List of nodes which inputs were modified 454 455 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> 456 Arena _node_arena; // Arena for new-space Nodes 457 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform 458 RootNode* _root; // Unique root of compilation, or NULL after bail-out. 459 Node* _top; // Unique top node. (Reset by various phases.) 460 461 Node* _immutable_memory; // Initial memory state 462 463 Node* _recent_alloc_obj; 464 Node* _recent_alloc_ctl; 465 466 // Constant table 467 ConstantTable _constant_table; // The constant table for this compile. 468 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. 469 470 471 // Blocked array of debugging and profiling information, 472 // tracked per node. 473 enum { _log2_node_notes_block_size = 8, 474 _node_notes_block_size = (1<<_log2_node_notes_block_size) 475 }; 476 GrowableArray<Node_Notes*>* _node_note_array; 477 Node_Notes* _default_node_notes; // default notes for new nodes 478 479 // After parsing and every bulk phase we hang onto the Root instruction. 480 // The RootNode instruction is where the whole program begins. It produces 481 // the initial Control and BOTTOM for everybody else. 482 483 // Type management 484 Arena _Compile_types; // Arena for all types 485 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() 486 Dict* _type_dict; // Intern table 487 CloneMap _clone_map; // used for recording history of cloned nodes 488 void* _type_hwm; // Last allocation (see Type::operator new/delete) 489 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) 490 ciMethod* _last_tf_m; // Cache for 491 const TypeFunc* _last_tf; // TypeFunc::make 492 AliasType** _alias_types; // List of alias types seen so far. 493 int _num_alias_types; // Logical length of _alias_types 494 int _max_alias_types; // Physical length of _alias_types 495 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking 496 497 // Parsing, optimization 498 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN 499 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN 500 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. 501 502 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after 503 // main parsing has finished. 504 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations 505 506 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations 507 508 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) 509 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending 510 511 512 // Inlining may not happen in parse order which would make 513 // PrintInlining output confusing. Keep track of PrintInlining 514 // pieces in order. 515 class PrintInliningBuffer : public ResourceObj { 516 private: 517 CallGenerator* _cg; 518 stringStream* _ss; 519 520 public: 521 PrintInliningBuffer() 522 : _cg(NULL) { _ss = new stringStream(); } 523 524 stringStream* ss() const { return _ss; } 525 CallGenerator* cg() const { return _cg; } 526 void set_cg(CallGenerator* cg) { _cg = cg; } 527 }; 528 529 stringStream* _print_inlining_stream; 530 GrowableArray<PrintInliningBuffer>* _print_inlining_list; 531 int _print_inlining_idx; 532 char* _print_inlining_output; 533 534 // Only keep nodes in the expensive node list that need to be optimized 535 void cleanup_expensive_nodes(PhaseIterGVN &igvn); 536 // Use for sorting expensive nodes to bring similar nodes together 537 static int cmp_expensive_nodes(Node** n1, Node** n2); 538 // Expensive nodes list already sorted? 539 bool expensive_nodes_sorted() const; 540 // Remove the speculative part of types and clean up the graph 541 void remove_speculative_types(PhaseIterGVN &igvn); 542 543 void* _replay_inline_data; // Pointer to data loaded from file 544 545 void print_inlining_init(); 546 void print_inlining_reinit(); 547 void print_inlining_commit(); 548 void print_inlining_push(); 549 PrintInliningBuffer& print_inlining_current(); 550 551 void log_late_inline_failure(CallGenerator* cg, const char* msg); 552 553 public: 554 555 void* barrier_set_state() const { return _barrier_set_state; } 556 557 outputStream* print_inlining_stream() const { 558 assert(print_inlining() || print_intrinsics(), "PrintInlining off?"); 559 return _print_inlining_stream; 560 } 561 562 void print_inlining_update(CallGenerator* cg); 563 void print_inlining_update_delayed(CallGenerator* cg); 564 void print_inlining_move_to(CallGenerator* cg); 565 void print_inlining_assert_ready(); 566 void print_inlining_reset(); 567 568 void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) { 569 stringStream ss; 570 CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg); 571 print_inlining_stream()->print("%s", ss.as_string()); 572 } 573 574 #ifndef PRODUCT 575 IdealGraphPrinter* printer() { return _printer; } 576 #endif 577 578 void log_late_inline(CallGenerator* cg); 579 void log_inline_id(CallGenerator* cg); 580 void log_inline_failure(const char* msg); 581 582 void* replay_inline_data() const { return _replay_inline_data; } 583 584 // Dump inlining replay data to the stream. 585 void dump_inline_data(outputStream* out); 586 587 private: 588 // Matching, CFG layout, allocation, code generation 589 PhaseCFG* _cfg; // Results of CFG finding 590 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result 591 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results 592 int _java_calls; // Number of java calls in the method 593 int _inner_loops; // Number of inner loops in the method 594 Matcher* _matcher; // Engine to map ideal to machine instructions 595 PhaseRegAlloc* _regalloc; // Results of register allocation. 596 int _frame_slots; // Size of total frame in stack slots 597 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries 598 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) 599 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin 600 void* _indexSet_free_block_list; // free list of IndexSet bit blocks 601 int _interpreter_frame_size; 602 603 uint _node_bundling_limit; 604 Bundle* _node_bundling_base; // Information for instruction bundling 605 606 // Instruction bits passed off to the VM 607 int _method_size; // Size of nmethod code segment in bytes 608 CodeBuffer _code_buffer; // Where the code is assembled 609 int _first_block_size; // Size of unvalidated entry point code / OSR poison code 610 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers 611 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code 612 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location) 613 static int _CompiledZap_count; // counter compared against CompileZap[First/Last] 614 BufferBlob* _scratch_buffer_blob; // For temporary code buffers. 615 relocInfo* _scratch_locs_memory; // For temporary code buffers. 616 int _scratch_const_size; // For temporary code buffers. 617 bool _in_scratch_emit_size; // true when in scratch_emit_size. 618 619 void reshape_address(AddPNode* n); 620 621 public: 622 // Accessors 623 624 // The Compile instance currently active in this (compiler) thread. 625 static Compile* current() { 626 return (Compile*) ciEnv::current()->compiler_data(); 627 } 628 629 // ID for this compilation. Useful for setting breakpoints in the debugger. 630 int compile_id() const { return _compile_id; } 631 DirectiveSet* directive() const { return _directive; } 632 633 // Does this compilation allow instructions to subsume loads? User 634 // instructions that subsume a load may result in an unschedulable 635 // instruction sequence. 636 bool subsume_loads() const { return _subsume_loads; } 637 /** Do escape analysis. */ 638 bool do_escape_analysis() const { return _do_escape_analysis; } 639 /** Do boxing elimination. */ 640 bool eliminate_boxing() const { return _eliminate_boxing; } 641 /** Do aggressive boxing elimination. */ 642 bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; } 643 bool save_argument_registers() const { return _save_argument_registers; } 644 645 646 // Other fixed compilation parameters. 647 ciMethod* method() const { return _method; } 648 int entry_bci() const { return _entry_bci; } 649 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } 650 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } 651 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } 652 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } 653 InlineTree* ilt() const { return _ilt; } 654 address stub_function() const { return _stub_function; } 655 const char* stub_name() const { return _stub_name; } 656 address stub_entry_point() const { return _stub_entry_point; } 657 658 // Control of this compilation. 659 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } 660 void set_fixed_slots(int n) { _fixed_slots = n; } 661 int major_progress() const { return _major_progress; } 662 void set_inlining_progress(bool z) { _inlining_progress = z; } 663 int inlining_progress() const { return _inlining_progress; } 664 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; } 665 int inlining_incrementally() const { return _inlining_incrementally; } 666 void set_do_cleanup(bool z) { _do_cleanup = z; } 667 int do_cleanup() const { return _do_cleanup; } 668 void set_major_progress() { _major_progress++; } 669 void clear_major_progress() { _major_progress = 0; } 670 int max_inline_size() const { return _max_inline_size; } 671 void set_freq_inline_size(int n) { _freq_inline_size = n; } 672 int freq_inline_size() const { return _freq_inline_size; } 673 void set_max_inline_size(int n) { _max_inline_size = n; } 674 bool has_loops() const { return _has_loops; } 675 void set_has_loops(bool z) { _has_loops = z; } 676 bool has_split_ifs() const { return _has_split_ifs; } 677 void set_has_split_ifs(bool z) { _has_split_ifs = z; } 678 bool has_unsafe_access() const { return _has_unsafe_access; } 679 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 680 bool has_stringbuilder() const { return _has_stringbuilder; } 681 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } 682 bool has_boxed_value() const { return _has_boxed_value; } 683 void set_has_boxed_value(bool z) { _has_boxed_value = z; } 684 bool has_reserved_stack_access() const { return _has_reserved_stack_access; } 685 void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; } 686 uint max_vector_size() const { return _max_vector_size; } 687 void set_max_vector_size(uint s) { _max_vector_size = s; } 688 bool clear_upper_avx() const { return _clear_upper_avx; } 689 void set_clear_upper_avx(bool s) { _clear_upper_avx = s; } 690 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } 691 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } 692 bool trap_can_recompile() const { return _trap_can_recompile; } 693 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } 694 uint decompile_count() const { return _decompile_count; } 695 void set_decompile_count(uint c) { _decompile_count = c; } 696 bool allow_range_check_smearing() const; 697 bool do_inlining() const { return _do_inlining; } 698 void set_do_inlining(bool z) { _do_inlining = z; } 699 bool do_scheduling() const { return _do_scheduling; } 700 void set_do_scheduling(bool z) { _do_scheduling = z; } 701 bool do_freq_based_layout() const{ return _do_freq_based_layout; } 702 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } 703 bool do_count_invocations() const{ return _do_count_invocations; } 704 void set_do_count_invocations(bool z){ _do_count_invocations = z; } 705 bool do_method_data_update() const { return _do_method_data_update; } 706 void set_do_method_data_update(bool z) { _do_method_data_update = z; } 707 bool do_vector_loop() const { return _do_vector_loop; } 708 void set_do_vector_loop(bool z) { _do_vector_loop = z; } 709 bool use_cmove() const { return _use_cmove; } 710 void set_use_cmove(bool z) { _use_cmove = z; } 711 bool age_code() const { return _age_code; } 712 void set_age_code(bool z) { _age_code = z; } 713 int AliasLevel() const { return _AliasLevel; } 714 bool print_assembly() const { return _print_assembly; } 715 void set_print_assembly(bool z) { _print_assembly = z; } 716 bool print_inlining() const { return _print_inlining; } 717 void set_print_inlining(bool z) { _print_inlining = z; } 718 bool print_intrinsics() const { return _print_intrinsics; } 719 void set_print_intrinsics(bool z) { _print_intrinsics = z; } 720 RTMState rtm_state() const { return _rtm_state; } 721 void set_rtm_state(RTMState s) { _rtm_state = s; } 722 bool use_rtm() const { return (_rtm_state & NoRTM) == 0; } 723 bool profile_rtm() const { return _rtm_state == ProfileRTM; } 724 uint max_node_limit() const { return (uint)_max_node_limit; } 725 void set_max_node_limit(uint n) { _max_node_limit = n; } 726 void set_flattened_accesses() { _has_flattened_accesses = true; } 727 bool flattened_accesses_share_alias() const { return _flattened_accesses_share_alias; } 728 void set_flattened_accesses_share_alias(bool z) { _flattened_accesses_share_alias = z; } 729 730 // Support for scalarized value type calling convention 731 bool has_scalarized_args() const { return _method != NULL && _method->has_scalarized_args(); } 732 bool needs_stack_repair() const { return _method != NULL && _method->get_Method()->c2_needs_stack_repair(); } 733 int sp_inc_offset() const { return _sp_inc_slot_offset_in_bytes; } 734 735 // check the CompilerOracle for special behaviours for this compile 736 bool method_has_option(const char * option) { 737 return method() != NULL && method()->has_option(option); 738 } 739 740 #ifndef PRODUCT 741 bool trace_opto_output() const { return _trace_opto_output; } 742 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } 743 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } 744 int _in_dump_cnt; // Required for dumping ir nodes. 745 #endif 746 bool has_irreducible_loop() const { return _has_irreducible_loop; } 747 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; } 748 749 // JSR 292 750 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 751 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 752 753 Ticks _latest_stage_start_counter; 754 755 void begin_method() { 756 #ifndef PRODUCT 757 if (_printer && _printer->should_print(1)) { 758 _printer->begin_method(); 759 } 760 #endif 761 C->_latest_stage_start_counter.stamp(); 762 } 763 764 void print_method(CompilerPhaseType cpt, int level = 1) { 765 EventCompilerPhase event; 766 if (event.should_commit()) { 767 event.set_starttime(C->_latest_stage_start_counter); 768 event.set_phase((u1) cpt); 769 event.set_compileId(C->_compile_id); 770 event.set_phaseLevel(level); 771 event.commit(); 772 } 773 774 775 #ifndef PRODUCT 776 if (_printer && _printer->should_print(level)) { 777 _printer->print_method(CompilerPhaseTypeHelper::to_string(cpt), level); 778 } 779 #endif 780 C->_latest_stage_start_counter.stamp(); 781 } 782 783 void end_method(int level = 1) { 784 EventCompilerPhase event; 785 if (event.should_commit()) { 786 event.set_starttime(C->_latest_stage_start_counter); 787 event.set_phase((u1) PHASE_END); 788 event.set_compileId(C->_compile_id); 789 event.set_phaseLevel(level); 790 event.commit(); 791 } 792 #ifndef PRODUCT 793 if (_printer && _printer->should_print(level)) { 794 _printer->end_method(); 795 } 796 #endif 797 } 798 799 int macro_count() const { return _macro_nodes->length(); } 800 int predicate_count() const { return _predicate_opaqs->length();} 801 int expensive_count() const { return _expensive_nodes->length(); } 802 Node* macro_node(int idx) const { return _macro_nodes->at(idx); } 803 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} 804 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } 805 ConnectionGraph* congraph() { return _congraph;} 806 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} 807 void add_macro_node(Node * n) { 808 //assert(n->is_macro(), "must be a macro node"); 809 assert(!_macro_nodes->contains(n), "duplicate entry in expand list"); 810 _macro_nodes->append(n); 811 } 812 void remove_macro_node(Node * n) { 813 // this function may be called twice for a node so check 814 // that the node is in the array before attempting to remove it 815 if (_macro_nodes->contains(n)) 816 _macro_nodes->remove(n); 817 // remove from _predicate_opaqs list also if it is there 818 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ 819 _predicate_opaqs->remove(n); 820 } 821 } 822 void add_expensive_node(Node * n); 823 void remove_expensive_node(Node * n) { 824 if (_expensive_nodes->contains(n)) { 825 _expensive_nodes->remove(n); 826 } 827 } 828 void add_predicate_opaq(Node * n) { 829 assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1"); 830 assert(_macro_nodes->contains(n), "should have already been in macro list"); 831 _predicate_opaqs->append(n); 832 } 833 834 // Range check dependent CastII nodes that can be removed after loop optimizations 835 void add_range_check_cast(Node* n); 836 void remove_range_check_cast(Node* n) { 837 if (_range_check_casts->contains(n)) { 838 _range_check_casts->remove(n); 839 } 840 } 841 Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx); } 842 int range_check_cast_count() const { return _range_check_casts->length(); } 843 // Remove all range check dependent CastIINodes. 844 void remove_range_check_casts(PhaseIterGVN &igvn); 845 846 void add_opaque4_node(Node* n); 847 void remove_opaque4_node(Node* n) { 848 if (_opaque4_nodes->contains(n)) { 849 _opaque4_nodes->remove(n); 850 } 851 } 852 Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx); } 853 int opaque4_count() const { return _opaque4_nodes->length(); } 854 void remove_opaque4_nodes(PhaseIterGVN &igvn); 855 856 // Keep track of value type nodes for later processing 857 void add_value_type(Node* n); 858 void remove_value_type(Node* n); 859 void process_value_types(PhaseIterGVN &igvn); 860 bool can_add_value_type() const { return _value_type_nodes != NULL; } 861 862 void adjust_flattened_array_access_aliases(PhaseIterGVN& igvn); 863 864 // remove the opaque nodes that protect the predicates so that the unused checks and 865 // uncommon traps will be eliminated from the graph. 866 void cleanup_loop_predicates(PhaseIterGVN &igvn); 867 bool is_predicate_opaq(Node * n) { 868 return _predicate_opaqs->contains(n); 869 } 870 871 // Are there candidate expensive nodes for optimization? 872 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn); 873 // Check whether n1 and n2 are similar 874 static int cmp_expensive_nodes(Node* n1, Node* n2); 875 // Sort expensive nodes to locate similar expensive nodes 876 void sort_expensive_nodes(); 877 878 // Compilation environment. 879 Arena* comp_arena() { return &_comp_arena; } 880 ciEnv* env() const { return _env; } 881 CompileLog* log() const { return _log; } 882 bool failing() const { return _env->failing() || _failure_reason != NULL; } 883 const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; } 884 885 bool failure_reason_is(const char* r) const { 886 return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0); 887 } 888 889 void record_failure(const char* reason); 890 void record_method_not_compilable(const char* reason) { 891 // Bailouts cover "all_tiers" when TieredCompilation is off. 892 env()->record_method_not_compilable(reason, !TieredCompilation); 893 // Record failure reason. 894 record_failure(reason); 895 } 896 bool check_node_count(uint margin, const char* reason) { 897 if (live_nodes() + margin > max_node_limit()) { 898 record_method_not_compilable(reason); 899 return true; 900 } else { 901 return false; 902 } 903 } 904 905 // Node management 906 uint unique() const { return _unique; } 907 uint next_unique() { return _unique++; } 908 void set_unique(uint i) { _unique = i; } 909 static int debug_idx() { return debug_only(_debug_idx)+0; } 910 static void set_debug_idx(int i) { debug_only(_debug_idx = i); } 911 Arena* node_arena() { return &_node_arena; } 912 Arena* old_arena() { return &_old_arena; } 913 RootNode* root() const { return _root; } 914 void set_root(RootNode* r) { _root = r; } 915 StartNode* start() const; // (Derived from root.) 916 void init_start(StartNode* s); 917 Node* immutable_memory(); 918 919 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } 920 Node* recent_alloc_obj() const { return _recent_alloc_obj; } 921 void set_recent_alloc(Node* ctl, Node* obj) { 922 _recent_alloc_ctl = ctl; 923 _recent_alloc_obj = obj; 924 } 925 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; 926 _dead_node_count++; 927 } 928 bool is_dead_node(uint idx) { return _dead_node_list.test(idx) != 0; } 929 uint dead_node_count() { return _dead_node_count; } 930 void reset_dead_node_list() { _dead_node_list.Reset(); 931 _dead_node_count = 0; 932 } 933 uint live_nodes() const { 934 int val = _unique - _dead_node_count; 935 assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count); 936 return (uint) val; 937 } 938 #ifdef ASSERT 939 uint count_live_nodes_by_graph_walk(); 940 void print_missing_nodes(); 941 #endif 942 943 // Record modified nodes to check that they are put on IGVN worklist 944 void record_modified_node(Node* n) NOT_DEBUG_RETURN; 945 void remove_modified_node(Node* n) NOT_DEBUG_RETURN; 946 DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } ) 947 948 // Constant table 949 ConstantTable& constant_table() { return _constant_table; } 950 951 MachConstantBaseNode* mach_constant_base_node(); 952 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } 953 // Generated by adlc, true if CallNode requires MachConstantBase. 954 bool needs_clone_jvms(); 955 956 // Handy undefined Node 957 Node* top() const { return _top; } 958 959 // these are used by guys who need to know about creation and transformation of top: 960 Node* cached_top_node() { return _top; } 961 void set_cached_top_node(Node* tn); 962 963 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } 964 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } 965 Node_Notes* default_node_notes() const { return _default_node_notes; } 966 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } 967 968 Node_Notes* node_notes_at(int idx) { 969 return locate_node_notes(_node_note_array, idx, false); 970 } 971 inline bool set_node_notes_at(int idx, Node_Notes* value); 972 973 // Copy notes from source to dest, if they exist. 974 // Overwrite dest only if source provides something. 975 // Return true if information was moved. 976 bool copy_node_notes_to(Node* dest, Node* source); 977 978 // Workhorse function to sort out the blocked Node_Notes array: 979 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, 980 int idx, bool can_grow = false); 981 982 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); 983 984 // Type management 985 Arena* type_arena() { return _type_arena; } 986 Dict* type_dict() { return _type_dict; } 987 void* type_hwm() { return _type_hwm; } 988 size_t type_last_size() { return _type_last_size; } 989 int num_alias_types() { return _num_alias_types; } 990 991 void init_type_arena() { _type_arena = &_Compile_types; } 992 void set_type_arena(Arena* a) { _type_arena = a; } 993 void set_type_dict(Dict* d) { _type_dict = d; } 994 void set_type_hwm(void* p) { _type_hwm = p; } 995 void set_type_last_size(size_t sz) { _type_last_size = sz; } 996 997 const TypeFunc* last_tf(ciMethod* m) { 998 return (m == _last_tf_m) ? _last_tf : NULL; 999 } 1000 void set_last_tf(ciMethod* m, const TypeFunc* tf) { 1001 assert(m != NULL || tf == NULL, ""); 1002 _last_tf_m = m; 1003 _last_tf = tf; 1004 } 1005 1006 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 1007 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); } 1008 bool have_alias_type(const TypePtr* adr_type); 1009 AliasType* alias_type(ciField* field); 1010 1011 int get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, NULL, uncached)->index(); } 1012 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } 1013 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } 1014 1015 // Building nodes 1016 void rethrow_exceptions(JVMState* jvms); 1017 void return_values(JVMState* jvms); 1018 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); 1019 1020 // Decide how to build a call. 1021 // The profile factor is a discount to apply to this site's interp. profile. 1022 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, 1023 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL, 1024 bool allow_intrinsics = true, bool delayed_forbidden = false); 1025 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { 1026 return should_delay_string_inlining(call_method, jvms) || 1027 should_delay_boxing_inlining(call_method, jvms); 1028 } 1029 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); 1030 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); 1031 1032 // Helper functions to identify inlining potential at call-site 1033 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, 1034 ciKlass* holder, ciMethod* callee, 1035 const TypeOopPtr* receiver_type, bool is_virtual, 1036 bool &call_does_dispatch, int &vtable_index, 1037 bool check_access = true); 1038 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 1039 ciMethod* callee, const TypeOopPtr* receiver_type, 1040 bool check_access = true); 1041 1042 // Report if there were too many traps at a current method and bci. 1043 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 1044 // If there is no MDO at all, report no trap unless told to assume it. 1045 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 1046 // This version, unspecific to a particular bci, asks if 1047 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. 1048 bool too_many_traps(Deoptimization::DeoptReason reason, 1049 // Privately used parameter for logging: 1050 ciMethodData* logmd = NULL); 1051 // Report if there were too many recompiles at a method and bci. 1052 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 1053 // Report if there were too many traps or recompiles at a method and bci. 1054 bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) { 1055 return too_many_traps(method, bci, reason) || 1056 too_many_recompiles(method, bci, reason); 1057 } 1058 // Return a bitset with the reasons where deoptimization is allowed, 1059 // i.e., where there were not too many uncommon traps. 1060 int _allowed_reasons; 1061 int allowed_deopt_reasons() { return _allowed_reasons; } 1062 void set_allowed_deopt_reasons(); 1063 1064 // Parsing, optimization 1065 PhaseGVN* initial_gvn() { return _initial_gvn; } 1066 Unique_Node_List* for_igvn() { return _for_igvn; } 1067 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. 1068 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } 1069 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } 1070 1071 // Replace n by nn using initial_gvn, calling hash_delete and 1072 // record_for_igvn as needed. 1073 void gvn_replace_by(Node* n, Node* nn); 1074 1075 1076 void identify_useful_nodes(Unique_Node_List &useful); 1077 void update_dead_node_list(Unique_Node_List &useful); 1078 void remove_useless_nodes (Unique_Node_List &useful); 1079 1080 WarmCallInfo* warm_calls() const { return _warm_calls; } 1081 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } 1082 WarmCallInfo* pop_warm_call(); 1083 1084 // Record this CallGenerator for inlining at the end of parsing. 1085 void add_late_inline(CallGenerator* cg) { 1086 _late_inlines.insert_before(_late_inlines_pos, cg); 1087 _late_inlines_pos++; 1088 } 1089 1090 void prepend_late_inline(CallGenerator* cg) { 1091 _late_inlines.insert_before(0, cg); 1092 } 1093 1094 void add_string_late_inline(CallGenerator* cg) { 1095 _string_late_inlines.push(cg); 1096 } 1097 1098 void add_boxing_late_inline(CallGenerator* cg) { 1099 _boxing_late_inlines.push(cg); 1100 } 1101 1102 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); 1103 1104 void process_print_inlining(); 1105 void dump_print_inlining(); 1106 1107 bool over_inlining_cutoff() const { 1108 if (!inlining_incrementally()) { 1109 return unique() > (uint)NodeCountInliningCutoff; 1110 } else { 1111 // Give some room for incremental inlining algorithm to "breathe" 1112 // and avoid thrashing when live node count is close to the limit. 1113 // Keep in mind that live_nodes() isn't accurate during inlining until 1114 // dead node elimination step happens (see Compile::inline_incrementally). 1115 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10; 1116 } 1117 } 1118 1119 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; } 1120 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; } 1121 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; } 1122 1123 bool inline_incrementally_one(); 1124 void inline_incrementally_cleanup(PhaseIterGVN& igvn); 1125 void inline_incrementally(PhaseIterGVN& igvn); 1126 void inline_string_calls(bool parse_time); 1127 void inline_boxing_calls(PhaseIterGVN& igvn); 1128 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode); 1129 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn); 1130 1131 // Matching, CFG layout, allocation, code generation 1132 PhaseCFG* cfg() { return _cfg; } 1133 bool select_24_bit_instr() const { return _select_24_bit_instr; } 1134 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } 1135 bool has_java_calls() const { return _java_calls > 0; } 1136 int java_calls() const { return _java_calls; } 1137 int inner_loops() const { return _inner_loops; } 1138 Matcher* matcher() { return _matcher; } 1139 PhaseRegAlloc* regalloc() { return _regalloc; } 1140 int frame_slots() const { return _frame_slots; } 1141 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words' 1142 int frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; } 1143 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } 1144 Arena* indexSet_arena() { return _indexSet_arena; } 1145 void* indexSet_free_block_list() { return _indexSet_free_block_list; } 1146 uint node_bundling_limit() { return _node_bundling_limit; } 1147 Bundle* node_bundling_base() { return _node_bundling_base; } 1148 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; } 1149 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; } 1150 bool starts_bundle(const Node *n) const; 1151 bool need_stack_bang(int frame_size_in_bytes) const; 1152 bool need_register_stack_bang() const; 1153 1154 void update_interpreter_frame_size(int size) { 1155 if (_interpreter_frame_size < size) { 1156 _interpreter_frame_size = size; 1157 } 1158 } 1159 int bang_size_in_bytes() const; 1160 1161 void set_matcher(Matcher* m) { _matcher = m; } 1162 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } 1163 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } 1164 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } 1165 1166 // Remember if this compilation changes hardware mode to 24-bit precision 1167 void set_24_bit_selection_and_mode(bool selection, bool mode) { 1168 _select_24_bit_instr = selection; 1169 _in_24_bit_fp_mode = mode; 1170 } 1171 1172 void set_java_calls(int z) { _java_calls = z; } 1173 void set_inner_loops(int z) { _inner_loops = z; } 1174 1175 // Instruction bits passed off to the VM 1176 int code_size() { return _method_size; } 1177 CodeBuffer* code_buffer() { return &_code_buffer; } 1178 int first_block_size() { return _first_block_size; } 1179 void set_frame_complete(int off) { if (!in_scratch_emit_size()) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } } 1180 ExceptionHandlerTable* handler_table() { return &_handler_table; } 1181 ImplicitExceptionTable* inc_table() { return &_inc_table; } 1182 OopMapSet* oop_map_set() { return _oop_map_set; } 1183 DebugInformationRecorder* debug_info() { return env()->debug_info(); } 1184 Dependencies* dependencies() { return env()->dependencies(); } 1185 static int CompiledZap_count() { return _CompiledZap_count; } 1186 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } 1187 void init_scratch_buffer_blob(int const_size); 1188 void clear_scratch_buffer_blob(); 1189 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } 1190 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } 1191 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } 1192 1193 // emit to scratch blob, report resulting size 1194 uint scratch_emit_size(const Node* n); 1195 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; } 1196 bool in_scratch_emit_size() const { return _in_scratch_emit_size; } 1197 1198 enum ScratchBufferBlob { 1199 #if defined(PPC64) 1200 MAX_inst_size = 2048, 1201 #else 1202 MAX_inst_size = 1024, 1203 #endif 1204 MAX_locs_size = 128, // number of relocInfo elements 1205 MAX_const_size = 128, 1206 MAX_stubs_size = 128 1207 }; 1208 1209 // Major entry point. Given a Scope, compile the associated method. 1210 // For normal compilations, entry_bci is InvocationEntryBci. For on stack 1211 // replacement, entry_bci indicates the bytecode for which to compile a 1212 // continuation. 1213 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, 1214 int entry_bci, bool subsume_loads, bool do_escape_analysis, 1215 bool eliminate_boxing, DirectiveSet* directive); 1216 1217 // Second major entry point. From the TypeFunc signature, generate code 1218 // to pass arguments from the Java calling convention to the C calling 1219 // convention. 1220 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), 1221 address stub_function, const char *stub_name, 1222 int is_fancy_jump, bool pass_tls, 1223 bool save_arg_registers, bool return_pc, DirectiveSet* directive); 1224 1225 // From the TypeFunc signature, generate code to pass arguments 1226 // from Compiled calling convention to Interpreter's calling convention 1227 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); 1228 1229 // From the TypeFunc signature, generate code to pass arguments 1230 // from Interpreter's calling convention to Compiler's calling convention 1231 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); 1232 1233 // Are we compiling a method? 1234 bool has_method() { return method() != NULL; } 1235 1236 // Maybe print some information about this compile. 1237 void print_compile_messages(); 1238 1239 // Final graph reshaping, a post-pass after the regular optimizer is done. 1240 bool final_graph_reshaping(); 1241 1242 // returns true if adr is completely contained in the given alias category 1243 bool must_alias(const TypePtr* adr, int alias_idx); 1244 1245 // returns true if adr overlaps with the given alias category 1246 bool can_alias(const TypePtr* adr, int alias_idx); 1247 1248 // Driver for converting compiler's IR into machine code bits 1249 void Output(); 1250 1251 // Accessors for node bundling info. 1252 Bundle* node_bundling(const Node *n); 1253 bool valid_bundle_info(const Node *n); 1254 1255 // Schedule and Bundle the instructions 1256 void ScheduleAndBundle(); 1257 1258 // Build OopMaps for each GC point 1259 void BuildOopMaps(); 1260 1261 // Append debug info for the node "local" at safepoint node "sfpt" to the 1262 // "array", May also consult and add to "objs", which describes the 1263 // scalar-replaced objects. 1264 void FillLocArray( int idx, MachSafePointNode* sfpt, 1265 Node *local, GrowableArray<ScopeValue*> *array, 1266 GrowableArray<ScopeValue*> *objs ); 1267 1268 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. 1269 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); 1270 // Requres that "objs" does not contains an ObjectValue whose id matches 1271 // that of "sv. Appends "sv". 1272 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, 1273 ObjectValue* sv ); 1274 1275 // Process an OopMap Element while emitting nodes 1276 void Process_OopMap_Node(MachNode *mach, int code_offset); 1277 1278 // Initialize code buffer 1279 CodeBuffer* init_buffer(uint* blk_starts); 1280 1281 // Write out basic block data to code buffer 1282 void fill_buffer(CodeBuffer* cb, uint* blk_starts); 1283 1284 // Determine which variable sized branches can be shortened 1285 void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size); 1286 1287 // Compute the size of first NumberOfLoopInstrToAlign instructions 1288 // at the head of a loop. 1289 void compute_loop_first_inst_sizes(); 1290 1291 // Compute the information for the exception tables 1292 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels); 1293 1294 // Stack slots that may be unused by the calling convention but must 1295 // otherwise be preserved. On Intel this includes the return address. 1296 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 1297 uint in_preserve_stack_slots(); 1298 1299 // "Top of Stack" slots that may be unused by the calling convention but must 1300 // otherwise be preserved. 1301 // On Intel these are not necessary and the value can be zero. 1302 // On Sparc this describes the words reserved for storing a register window 1303 // when an interrupt occurs. 1304 static uint out_preserve_stack_slots(); 1305 1306 // Number of outgoing stack slots killed above the out_preserve_stack_slots 1307 // for calls to C. Supports the var-args backing area for register parms. 1308 uint varargs_C_out_slots_killed() const; 1309 1310 // Number of Stack Slots consumed by a synchronization entry 1311 int sync_stack_slots() const; 1312 1313 // Compute the name of old_SP. See <arch>.ad for frame layout. 1314 OptoReg::Name compute_old_SP(); 1315 1316 private: 1317 // Phase control: 1318 void Init(int aliaslevel); // Prepare for a single compilation 1319 int Inline_Warm(); // Find more inlining work. 1320 void Finish_Warm(); // Give up on further inlines. 1321 void Optimize(); // Given a graph, optimize it 1322 void Code_Gen(); // Generate code from a graph 1323 1324 // Management of the AliasType table. 1325 void grow_alias_types(); 1326 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 1327 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 1328 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false); 1329 1330 void verify_top(Node*) const PRODUCT_RETURN; 1331 1332 // Intrinsic setup. 1333 void register_library_intrinsics(); // initializer 1334 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor 1335 int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper 1336 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn 1337 void register_intrinsic(CallGenerator* cg); // update fn 1338 1339 #ifndef PRODUCT 1340 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; 1341 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; 1342 #endif 1343 // Function calls made by the public function final_graph_reshaping. 1344 // No need to be made public as they are not called elsewhere. 1345 void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc); 1346 void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop); 1347 void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ); 1348 void eliminate_redundant_card_marks(Node* n); 1349 1350 public: 1351 1352 // Note: Histogram array size is about 1 Kb. 1353 enum { // flag bits: 1354 _intrinsic_worked = 1, // succeeded at least once 1355 _intrinsic_failed = 2, // tried it but it failed 1356 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) 1357 _intrinsic_virtual = 8, // was seen in the virtual form (rare) 1358 _intrinsic_both = 16 // was seen in the non-virtual form (usual) 1359 }; 1360 // Update histogram. Return boolean if this is a first-time occurrence. 1361 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, 1362 bool is_virtual, int flags) PRODUCT_RETURN0; 1363 static void print_intrinsic_statistics() PRODUCT_RETURN; 1364 1365 // Graph verification code 1366 // Walk the node list, verifying that there is a one-to-one 1367 // correspondence between Use-Def edges and Def-Use edges 1368 // The option no_dead_code enables stronger checks that the 1369 // graph is strongly connected from root in both directions. 1370 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; 1371 1372 // End-of-run dumps. 1373 static void print_statistics() PRODUCT_RETURN; 1374 1375 // Dump formatted assembly 1376 #if defined(SUPPORT_OPTO_ASSEMBLY) 1377 void dump_asm_on(outputStream* ost, int* pcs, uint pc_limit); 1378 void dump_asm(int* pcs = NULL, uint pc_limit = 0) { dump_asm_on(tty, pcs, pc_limit); } 1379 #else 1380 void dump_asm_on(outputStream* ost, int* pcs, uint pc_limit) { return; } 1381 void dump_asm(int* pcs = NULL, uint pc_limit = 0) { return; } 1382 #endif 1383 void dump_pc(int *pcs, int pc_limit, Node *n); 1384 1385 // Verify ADLC assumptions during startup 1386 static void adlc_verification() PRODUCT_RETURN; 1387 1388 // Definitions of pd methods 1389 static void pd_compiler2_init(); 1390 1391 // Static parse-time type checking logic for gen_subtype_check: 1392 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test }; 1393 int static_subtype_check(ciKlass* superk, ciKlass* subk); 1394 1395 static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype, 1396 // Optional control dependency (for example, on range check) 1397 Node* ctrl = NULL); 1398 1399 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) 1400 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl); 1401 1402 Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b); 1403 1404 // Auxiliary method for randomized fuzzing/stressing 1405 static bool randomized_select(int count); 1406 1407 // supporting clone_map 1408 CloneMap& clone_map(); 1409 void set_clone_map(Dict* d); 1410 1411 bool is_compiling_clinit_for(ciKlass* k); 1412 }; 1413 1414 #endif // SHARE_OPTO_COMPILE_HPP