1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_COMPILE_HPP 26 #define SHARE_OPTO_COMPILE_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "ci/compilerInterface.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "compiler/compilerOracle.hpp" 32 #include "compiler/compileBroker.hpp" 33 #include "compiler/compilerEvent.hpp" 34 #include "libadt/dict.hpp" 35 #include "libadt/vectset.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/methodData.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/phasetype.hpp" 40 #include "opto/phase.hpp" 41 #include "opto/regmask.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/timerTrace.hpp" 44 #include "runtime/vmThread.hpp" 45 #include "utilities/ticks.hpp" 46 47 class AddPNode; 48 class Block; 49 class Bundle; 50 class CallGenerator; 51 class CloneMap; 52 class ConnectionGraph; 53 class IdealGraphPrinter; 54 class InlineTree; 55 class Int_Array; 56 class Matcher; 57 class MachConstantNode; 58 class MachConstantBaseNode; 59 class MachNode; 60 class MachOper; 61 class MachSafePointNode; 62 class Node; 63 class Node_Array; 64 class Node_Notes; 65 class NodeCloneInfo; 66 class OptoReg; 67 class PhaseCFG; 68 class PhaseGVN; 69 class PhaseIterGVN; 70 class PhaseRegAlloc; 71 class PhaseCCP; 72 class PhaseCCP_DCE; 73 class PhaseOutput; 74 class RootNode; 75 class relocInfo; 76 class Scope; 77 class StartNode; 78 class SafePointNode; 79 class JVMState; 80 class Type; 81 class TypeData; 82 class TypeInt; 83 class TypePtr; 84 class TypeOopPtr; 85 class TypeFunc; 86 class TypeVect; 87 class Unique_Node_List; 88 class nmethod; 89 class WarmCallInfo; 90 class Node_Stack; 91 struct Final_Reshape_Counts; 92 93 enum LoopOptsMode { 94 LoopOptsDefault, 95 LoopOptsNone, 96 LoopOptsMaxUnroll, 97 LoopOptsShenandoahExpand, 98 LoopOptsShenandoahPostExpand, 99 LoopOptsSkipSplitIf, 100 LoopOptsVerify 101 }; 102 103 typedef unsigned int node_idx_t; 104 class NodeCloneInfo { 105 private: 106 uint64_t _idx_clone_orig; 107 public: 108 109 void set_idx(node_idx_t idx) { 110 _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx; 111 } 112 node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); } 113 114 void set_gen(int generation) { 115 uint64_t g = (uint64_t)generation << 32; 116 _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g; 117 } 118 int gen() const { return (int)(_idx_clone_orig >> 32); } 119 120 void set(uint64_t x) { _idx_clone_orig = x; } 121 void set(node_idx_t x, int g) { set_idx(x); set_gen(g); } 122 uint64_t get() const { return _idx_clone_orig; } 123 124 NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {} 125 NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); } 126 127 void dump() const; 128 }; 129 130 class CloneMap { 131 friend class Compile; 132 private: 133 bool _debug; 134 Dict* _dict; 135 int _clone_idx; // current cloning iteration/generation in loop unroll 136 public: 137 void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy 138 node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; } 139 Dict* dict() const { return _dict; } 140 void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); } 141 void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); } 142 void remove(node_idx_t key) { _dict->Delete(_2p(key)); } 143 uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); } 144 node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); } 145 int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); } 146 int gen(const void* k) const { return gen(_2_node_idx_t(k)); } 147 int max_gen() const; 148 void clone(Node* old, Node* nnn, int gen); 149 void verify_insert_and_clone(Node* old, Node* nnn, int gen); 150 void dump(node_idx_t key) const; 151 152 int clone_idx() const { return _clone_idx; } 153 void set_clone_idx(int x) { _clone_idx = x; } 154 bool is_debug() const { return _debug; } 155 void set_debug(bool debug) { _debug = debug; } 156 static const char* debug_option_name; 157 158 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); } 159 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); } 160 }; 161 162 //------------------------------Compile---------------------------------------- 163 // This class defines a top-level Compiler invocation. 164 165 class Compile : public Phase { 166 friend class VMStructs; 167 168 public: 169 // Fixed alias indexes. (See also MergeMemNode.) 170 enum { 171 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) 172 AliasIdxBot = 2, // pseudo-index, aliases to everything 173 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM 174 }; 175 176 // Variant of TraceTime(NULL, &_t_accumulator, CITime); 177 // Integrated with logging. If logging is turned on, and CITimeVerbose is true, 178 // then brackets are put into the log, with time stamps and node counts. 179 // (The time collection itself is always conditionalized on CITime.) 180 class TracePhase : public TraceTime { 181 private: 182 Compile* C; 183 CompileLog* _log; 184 const char* _phase_name; 185 bool _dolog; 186 public: 187 TracePhase(const char* name, elapsedTimer* accumulator); 188 ~TracePhase(); 189 }; 190 191 // Information per category of alias (memory slice) 192 class AliasType { 193 private: 194 friend class Compile; 195 196 int _index; // unique index, used with MergeMemNode 197 const TypePtr* _adr_type; // normalized address type 198 ciField* _field; // relevant instance field, or null if none 199 const Type* _element; // relevant array element type, or null if none 200 bool _is_rewritable; // false if the memory is write-once only 201 int _general_index; // if this is type is an instance, the general 202 // type that this is an instance of 203 204 void Init(int i, const TypePtr* at); 205 206 public: 207 int index() const { return _index; } 208 const TypePtr* adr_type() const { return _adr_type; } 209 ciField* field() const { return _field; } 210 const Type* element() const { return _element; } 211 bool is_rewritable() const { return _is_rewritable; } 212 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } 213 int general_index() const { return (_general_index != 0) ? _general_index : _index; } 214 215 void set_rewritable(bool z) { _is_rewritable = z; } 216 void set_field(ciField* f) { 217 assert(!_field,""); 218 _field = f; 219 if (f->is_final() || f->is_stable()) { 220 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops. 221 _is_rewritable = false; 222 } 223 } 224 void set_element(const Type* e) { 225 assert(_element == NULL, ""); 226 _element = e; 227 } 228 229 BasicType basic_type() const; 230 231 void print_on(outputStream* st) PRODUCT_RETURN; 232 }; 233 234 enum { 235 logAliasCacheSize = 6, 236 AliasCacheSize = (1<<logAliasCacheSize) 237 }; 238 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type 239 enum { 240 trapHistLength = MethodData::_trap_hist_limit 241 }; 242 243 private: 244 // Fixed parameters to this compilation. 245 const int _compile_id; 246 const bool _save_argument_registers; // save/restore arg regs for trampolines 247 const bool _subsume_loads; // Load can be matched as part of a larger op. 248 const bool _do_escape_analysis; // Do escape analysis. 249 const bool _eliminate_boxing; // Do boxing elimination. 250 ciMethod* _method; // The method being compiled. 251 int _entry_bci; // entry bci for osr methods. 252 const TypeFunc* _tf; // My kind of signature 253 InlineTree* _ilt; // Ditto (temporary). 254 address _stub_function; // VM entry for stub being compiled, or NULL 255 const char* _stub_name; // Name of stub or adapter being compiled, or NULL 256 address _stub_entry_point; // Compile code entry for generated stub, or NULL 257 258 // Control of this compilation. 259 int _max_inline_size; // Max inline size for this compilation 260 int _freq_inline_size; // Max hot method inline size for this compilation 261 int _fixed_slots; // count of frame slots not allocated by the register 262 // allocator i.e. locks, original deopt pc, etc. 263 uintx _max_node_limit; // Max unique node count during a single compilation. 264 265 int _major_progress; // Count of something big happening 266 bool _inlining_progress; // progress doing incremental inlining? 267 bool _inlining_incrementally;// Are we doing incremental inlining (post parse) 268 bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining 269 bool _has_loops; // True if the method _may_ have some loops 270 bool _has_split_ifs; // True if the method _may_ have some split-if 271 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. 272 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated 273 bool _has_boxed_value; // True if a boxed object is allocated 274 bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess 275 uint _max_vector_size; // Maximum size of generated vectors 276 bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper 277 uint _trap_hist[trapHistLength]; // Cumulative traps 278 bool _trap_can_recompile; // Have we emitted a recompiling trap? 279 uint _decompile_count; // Cumulative decompilation counts. 280 bool _do_inlining; // True if we intend to do inlining 281 bool _do_scheduling; // True if we intend to do scheduling 282 bool _do_freq_based_layout; // True if we intend to do frequency based block layout 283 bool _do_count_invocations; // True if we generate code to count invocations 284 bool _do_method_data_update; // True if we generate code to update MethodData*s 285 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations 286 bool _use_cmove; // True if CMove should be used without profitability analysis 287 bool _age_code; // True if we need to profile code age (decrement the aging counter) 288 int _AliasLevel; // Locally-adjusted version of AliasLevel flag. 289 bool _print_assembly; // True if we should dump assembly code for this compilation 290 bool _print_inlining; // True if we should print inlining for this compilation 291 bool _print_intrinsics; // True if we should print intrinsics for this compilation 292 #ifndef PRODUCT 293 bool _trace_opto_output; 294 bool _print_ideal; 295 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing 296 #endif 297 bool _has_irreducible_loop; // Found irreducible loops 298 // JSR 292 299 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. 300 RTMState _rtm_state; // State of Restricted Transactional Memory usage 301 int _loop_opts_cnt; // loop opts round 302 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry 303 304 // Compilation environment. 305 Arena _comp_arena; // Arena with lifetime equivalent to Compile 306 void* _barrier_set_state; // Potential GC barrier state for Compile 307 ciEnv* _env; // CI interface 308 DirectiveSet* _directive; // Compiler directive 309 CompileLog* _log; // from CompilerThread 310 const char* _failure_reason; // for record_failure/failing pattern 311 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. 312 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. 313 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. 314 GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common 315 GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency 316 GrowableArray<Node*>* _opaque4_nodes; // List of Opaque4 nodes that have a default value 317 ConnectionGraph* _congraph; 318 #ifndef PRODUCT 319 IdealGraphPrinter* _printer; 320 static IdealGraphPrinter* _debug_file_printer; 321 static IdealGraphPrinter* _debug_network_printer; 322 #endif 323 324 325 // Node management 326 uint _unique; // Counter for unique Node indices 327 VectorSet _dead_node_list; // Set of dead nodes 328 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N). 329 // So use this to keep count and make the call O(1). 330 DEBUG_ONLY( Unique_Node_List* _modified_nodes; ) // List of nodes which inputs were modified 331 332 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> 333 Arena _node_arena; // Arena for new-space Nodes 334 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform 335 RootNode* _root; // Unique root of compilation, or NULL after bail-out. 336 Node* _top; // Unique top node. (Reset by various phases.) 337 338 Node* _immutable_memory; // Initial memory state 339 340 Node* _recent_alloc_obj; 341 Node* _recent_alloc_ctl; 342 343 // Constant table 344 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. 345 346 347 // Blocked array of debugging and profiling information, 348 // tracked per node. 349 enum { _log2_node_notes_block_size = 8, 350 _node_notes_block_size = (1<<_log2_node_notes_block_size) 351 }; 352 GrowableArray<Node_Notes*>* _node_note_array; 353 Node_Notes* _default_node_notes; // default notes for new nodes 354 355 // After parsing and every bulk phase we hang onto the Root instruction. 356 // The RootNode instruction is where the whole program begins. It produces 357 // the initial Control and BOTTOM for everybody else. 358 359 // Type management 360 Arena _Compile_types; // Arena for all types 361 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() 362 Dict* _type_dict; // Intern table 363 CloneMap _clone_map; // used for recording history of cloned nodes 364 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) 365 ciMethod* _last_tf_m; // Cache for 366 const TypeFunc* _last_tf; // TypeFunc::make 367 AliasType** _alias_types; // List of alias types seen so far. 368 int _num_alias_types; // Logical length of _alias_types 369 int _max_alias_types; // Physical length of _alias_types 370 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking 371 372 // Parsing, optimization 373 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN 374 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN 375 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. 376 377 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after 378 // main parsing has finished. 379 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations 380 381 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations 382 383 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) 384 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending 385 386 387 // Inlining may not happen in parse order which would make 388 // PrintInlining output confusing. Keep track of PrintInlining 389 // pieces in order. 390 class PrintInliningBuffer : public ResourceObj { 391 private: 392 CallGenerator* _cg; 393 stringStream* _ss; 394 395 public: 396 PrintInliningBuffer() 397 : _cg(NULL) { _ss = new stringStream(); } 398 399 void freeStream() { _ss->~stringStream(); _ss = NULL; } 400 401 stringStream* ss() const { return _ss; } 402 CallGenerator* cg() const { return _cg; } 403 void set_cg(CallGenerator* cg) { _cg = cg; } 404 }; 405 406 stringStream* _print_inlining_stream; 407 GrowableArray<PrintInliningBuffer>* _print_inlining_list; 408 int _print_inlining_idx; 409 char* _print_inlining_output; 410 411 // Only keep nodes in the expensive node list that need to be optimized 412 void cleanup_expensive_nodes(PhaseIterGVN &igvn); 413 // Use for sorting expensive nodes to bring similar nodes together 414 static int cmp_expensive_nodes(Node** n1, Node** n2); 415 // Expensive nodes list already sorted? 416 bool expensive_nodes_sorted() const; 417 // Remove the speculative part of types and clean up the graph 418 void remove_speculative_types(PhaseIterGVN &igvn); 419 420 void* _replay_inline_data; // Pointer to data loaded from file 421 422 void print_inlining_stream_free(); 423 void print_inlining_init(); 424 void print_inlining_reinit(); 425 void print_inlining_commit(); 426 void print_inlining_push(); 427 PrintInliningBuffer& print_inlining_current(); 428 429 void log_late_inline_failure(CallGenerator* cg, const char* msg); 430 431 public: 432 433 void* barrier_set_state() const { return _barrier_set_state; } 434 435 outputStream* print_inlining_stream() const { 436 assert(print_inlining() || print_intrinsics(), "PrintInlining off?"); 437 return _print_inlining_stream; 438 } 439 440 void print_inlining_update(CallGenerator* cg); 441 void print_inlining_update_delayed(CallGenerator* cg); 442 void print_inlining_move_to(CallGenerator* cg); 443 void print_inlining_assert_ready(); 444 void print_inlining_reset(); 445 446 void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) { 447 stringStream ss; 448 CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg); 449 print_inlining_stream()->print("%s", ss.as_string()); 450 } 451 452 #ifndef PRODUCT 453 IdealGraphPrinter* printer() { return _printer; } 454 #endif 455 456 void log_late_inline(CallGenerator* cg); 457 void log_inline_id(CallGenerator* cg); 458 void log_inline_failure(const char* msg); 459 460 void* replay_inline_data() const { return _replay_inline_data; } 461 462 // Dump inlining replay data to the stream. 463 void dump_inline_data(outputStream* out); 464 465 private: 466 // Matching, CFG layout, allocation, code generation 467 PhaseCFG* _cfg; // Results of CFG finding 468 int _java_calls; // Number of java calls in the method 469 int _inner_loops; // Number of inner loops in the method 470 Matcher* _matcher; // Engine to map ideal to machine instructions 471 PhaseRegAlloc* _regalloc; // Results of register allocation. 472 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) 473 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin 474 void* _indexSet_free_block_list; // free list of IndexSet bit blocks 475 int _interpreter_frame_size; 476 477 PhaseOutput* _output; 478 479 void reshape_address(AddPNode* n); 480 481 public: 482 // Accessors 483 484 // The Compile instance currently active in this (compiler) thread. 485 static Compile* current() { 486 return (Compile*) ciEnv::current()->compiler_data(); 487 } 488 489 int interpreter_frame_size() const { return _interpreter_frame_size; } 490 491 PhaseOutput* output() const { return _output; } 492 void set_output(PhaseOutput* o) { _output = o; } 493 494 // ID for this compilation. Useful for setting breakpoints in the debugger. 495 int compile_id() const { return _compile_id; } 496 DirectiveSet* directive() const { return _directive; } 497 498 // Does this compilation allow instructions to subsume loads? User 499 // instructions that subsume a load may result in an unschedulable 500 // instruction sequence. 501 bool subsume_loads() const { return _subsume_loads; } 502 /** Do escape analysis. */ 503 bool do_escape_analysis() const { return _do_escape_analysis; } 504 /** Do boxing elimination. */ 505 bool eliminate_boxing() const { return _eliminate_boxing; } 506 /** Do aggressive boxing elimination. */ 507 bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; } 508 bool save_argument_registers() const { return _save_argument_registers; } 509 510 511 // Other fixed compilation parameters. 512 ciMethod* method() const { return _method; } 513 int entry_bci() const { return _entry_bci; } 514 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } 515 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } 516 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } 517 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } 518 InlineTree* ilt() const { return _ilt; } 519 address stub_function() const { return _stub_function; } 520 const char* stub_name() const { return _stub_name; } 521 address stub_entry_point() const { return _stub_entry_point; } 522 void set_stub_entry_point(address z) { _stub_entry_point = z; } 523 524 // Control of this compilation. 525 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } 526 void set_fixed_slots(int n) { _fixed_slots = n; } 527 int major_progress() const { return _major_progress; } 528 void set_inlining_progress(bool z) { _inlining_progress = z; } 529 int inlining_progress() const { return _inlining_progress; } 530 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; } 531 int inlining_incrementally() const { return _inlining_incrementally; } 532 void set_do_cleanup(bool z) { _do_cleanup = z; } 533 int do_cleanup() const { return _do_cleanup; } 534 void set_major_progress() { _major_progress++; } 535 void restore_major_progress(int progress) { _major_progress += progress; } 536 void clear_major_progress() { _major_progress = 0; } 537 int max_inline_size() const { return _max_inline_size; } 538 void set_freq_inline_size(int n) { _freq_inline_size = n; } 539 int freq_inline_size() const { return _freq_inline_size; } 540 void set_max_inline_size(int n) { _max_inline_size = n; } 541 bool has_loops() const { return _has_loops; } 542 void set_has_loops(bool z) { _has_loops = z; } 543 bool has_split_ifs() const { return _has_split_ifs; } 544 void set_has_split_ifs(bool z) { _has_split_ifs = z; } 545 bool has_unsafe_access() const { return _has_unsafe_access; } 546 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 547 bool has_stringbuilder() const { return _has_stringbuilder; } 548 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } 549 bool has_boxed_value() const { return _has_boxed_value; } 550 void set_has_boxed_value(bool z) { _has_boxed_value = z; } 551 bool has_reserved_stack_access() const { return _has_reserved_stack_access; } 552 void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; } 553 uint max_vector_size() const { return _max_vector_size; } 554 void set_max_vector_size(uint s) { _max_vector_size = s; } 555 bool clear_upper_avx() const { return _clear_upper_avx; } 556 void set_clear_upper_avx(bool s) { _clear_upper_avx = s; } 557 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } 558 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } 559 bool trap_can_recompile() const { return _trap_can_recompile; } 560 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } 561 uint decompile_count() const { return _decompile_count; } 562 void set_decompile_count(uint c) { _decompile_count = c; } 563 bool allow_range_check_smearing() const; 564 bool do_inlining() const { return _do_inlining; } 565 void set_do_inlining(bool z) { _do_inlining = z; } 566 bool do_scheduling() const { return _do_scheduling; } 567 void set_do_scheduling(bool z) { _do_scheduling = z; } 568 bool do_freq_based_layout() const{ return _do_freq_based_layout; } 569 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } 570 bool do_count_invocations() const{ return _do_count_invocations; } 571 void set_do_count_invocations(bool z){ _do_count_invocations = z; } 572 bool do_method_data_update() const { return _do_method_data_update; } 573 void set_do_method_data_update(bool z) { _do_method_data_update = z; } 574 bool do_vector_loop() const { return _do_vector_loop; } 575 void set_do_vector_loop(bool z) { _do_vector_loop = z; } 576 bool use_cmove() const { return _use_cmove; } 577 void set_use_cmove(bool z) { _use_cmove = z; } 578 bool age_code() const { return _age_code; } 579 void set_age_code(bool z) { _age_code = z; } 580 int AliasLevel() const { return _AliasLevel; } 581 bool print_assembly() const { return _print_assembly; } 582 void set_print_assembly(bool z) { _print_assembly = z; } 583 bool print_inlining() const { return _print_inlining; } 584 void set_print_inlining(bool z) { _print_inlining = z; } 585 bool print_intrinsics() const { return _print_intrinsics; } 586 void set_print_intrinsics(bool z) { _print_intrinsics = z; } 587 RTMState rtm_state() const { return _rtm_state; } 588 void set_rtm_state(RTMState s) { _rtm_state = s; } 589 bool use_rtm() const { return (_rtm_state & NoRTM) == 0; } 590 bool profile_rtm() const { return _rtm_state == ProfileRTM; } 591 uint max_node_limit() const { return (uint)_max_node_limit; } 592 void set_max_node_limit(uint n) { _max_node_limit = n; } 593 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; } 594 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; } 595 596 // check the CompilerOracle for special behaviours for this compile 597 bool method_has_option(const char * option) { 598 return method() != NULL && method()->has_option(option); 599 } 600 601 #ifndef PRODUCT 602 bool trace_opto_output() const { return _trace_opto_output; } 603 bool print_ideal() const { return _print_ideal; } 604 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } 605 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } 606 int _in_dump_cnt; // Required for dumping ir nodes. 607 #endif 608 bool has_irreducible_loop() const { return _has_irreducible_loop; } 609 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; } 610 611 // JSR 292 612 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 613 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 614 615 Ticks _latest_stage_start_counter; 616 617 void begin_method() { 618 #ifndef PRODUCT 619 if (_printer && _printer->should_print(1)) { 620 _printer->begin_method(); 621 } 622 #endif 623 C->_latest_stage_start_counter.stamp(); 624 } 625 626 bool should_print(int level = 1) { 627 #ifndef PRODUCT 628 return (_printer && _printer->should_print(level)); 629 #else 630 return false; 631 #endif 632 } 633 634 void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0) { 635 EventCompilerPhase event; 636 if (event.should_commit()) { 637 CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, cpt, C->_compile_id, level); 638 } 639 640 #ifndef PRODUCT 641 if (should_print(level)) { 642 char output[1024]; 643 if (idx != 0) { 644 jio_snprintf(output, sizeof(output), "%s:%d", CompilerPhaseTypeHelper::to_string(cpt), idx); 645 } else { 646 jio_snprintf(output, sizeof(output), "%s", CompilerPhaseTypeHelper::to_string(cpt)); 647 } 648 _printer->print_method(output, level); 649 } 650 #endif 651 C->_latest_stage_start_counter.stamp(); 652 } 653 654 #ifndef PRODUCT 655 void igv_print_method_to_file(const char* phase_name = "Debug", bool append = false); 656 void igv_print_method_to_network(const char* phase_name = "Debug"); 657 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; } 658 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; } 659 #endif 660 661 void end_method(int level = 1) { 662 EventCompilerPhase event; 663 if (event.should_commit()) { 664 CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, PHASE_END, C->_compile_id, level); 665 } 666 667 #ifndef PRODUCT 668 if (_printer && _printer->should_print(level)) { 669 _printer->end_method(); 670 } 671 #endif 672 } 673 674 int macro_count() const { return _macro_nodes->length(); } 675 int predicate_count() const { return _predicate_opaqs->length();} 676 int expensive_count() const { return _expensive_nodes->length(); } 677 Node* macro_node(int idx) const { return _macro_nodes->at(idx); } 678 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} 679 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } 680 ConnectionGraph* congraph() { return _congraph;} 681 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} 682 void add_macro_node(Node * n) { 683 //assert(n->is_macro(), "must be a macro node"); 684 assert(!_macro_nodes->contains(n), "duplicate entry in expand list"); 685 _macro_nodes->append(n); 686 } 687 void remove_macro_node(Node * n) { 688 // this function may be called twice for a node so check 689 // that the node is in the array before attempting to remove it 690 if (_macro_nodes->contains(n)) 691 _macro_nodes->remove(n); 692 // remove from _predicate_opaqs list also if it is there 693 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ 694 _predicate_opaqs->remove(n); 695 } 696 } 697 void add_expensive_node(Node * n); 698 void remove_expensive_node(Node * n) { 699 if (_expensive_nodes->contains(n)) { 700 _expensive_nodes->remove(n); 701 } 702 } 703 void add_predicate_opaq(Node * n) { 704 assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1"); 705 assert(_macro_nodes->contains(n), "should have already been in macro list"); 706 _predicate_opaqs->append(n); 707 } 708 709 // Range check dependent CastII nodes that can be removed after loop optimizations 710 void add_range_check_cast(Node* n); 711 void remove_range_check_cast(Node* n) { 712 if (_range_check_casts->contains(n)) { 713 _range_check_casts->remove(n); 714 } 715 } 716 Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx); } 717 int range_check_cast_count() const { return _range_check_casts->length(); } 718 // Remove all range check dependent CastIINodes. 719 void remove_range_check_casts(PhaseIterGVN &igvn); 720 721 void add_opaque4_node(Node* n); 722 void remove_opaque4_node(Node* n) { 723 if (_opaque4_nodes->contains(n)) { 724 _opaque4_nodes->remove(n); 725 } 726 } 727 Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx); } 728 int opaque4_count() const { return _opaque4_nodes->length(); } 729 void remove_opaque4_nodes(PhaseIterGVN &igvn); 730 731 void sort_macro_nodes(); 732 733 // remove the opaque nodes that protect the predicates so that the unused checks and 734 // uncommon traps will be eliminated from the graph. 735 void cleanup_loop_predicates(PhaseIterGVN &igvn); 736 bool is_predicate_opaq(Node * n) { 737 return _predicate_opaqs->contains(n); 738 } 739 740 // Are there candidate expensive nodes for optimization? 741 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn); 742 // Check whether n1 and n2 are similar 743 static int cmp_expensive_nodes(Node* n1, Node* n2); 744 // Sort expensive nodes to locate similar expensive nodes 745 void sort_expensive_nodes(); 746 747 // Compilation environment. 748 Arena* comp_arena() { return &_comp_arena; } 749 ciEnv* env() const { return _env; } 750 CompileLog* log() const { return _log; } 751 bool failing() const { return _env->failing() || _failure_reason != NULL; } 752 const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; } 753 754 bool failure_reason_is(const char* r) const { 755 return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0); 756 } 757 758 void record_failure(const char* reason); 759 void record_method_not_compilable(const char* reason) { 760 // Bailouts cover "all_tiers" when TieredCompilation is off. 761 env()->record_method_not_compilable(reason, !TieredCompilation); 762 // Record failure reason. 763 record_failure(reason); 764 } 765 bool check_node_count(uint margin, const char* reason) { 766 if (live_nodes() + margin > max_node_limit()) { 767 record_method_not_compilable(reason); 768 return true; 769 } else { 770 return false; 771 } 772 } 773 774 // Node management 775 uint unique() const { return _unique; } 776 uint next_unique() { return _unique++; } 777 void set_unique(uint i) { _unique = i; } 778 static int debug_idx() { return debug_only(_debug_idx)+0; } 779 static void set_debug_idx(int i) { debug_only(_debug_idx = i); } 780 Arena* node_arena() { return &_node_arena; } 781 Arena* old_arena() { return &_old_arena; } 782 RootNode* root() const { return _root; } 783 void set_root(RootNode* r) { _root = r; } 784 StartNode* start() const; // (Derived from root.) 785 void init_start(StartNode* s); 786 Node* immutable_memory(); 787 788 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } 789 Node* recent_alloc_obj() const { return _recent_alloc_obj; } 790 void set_recent_alloc(Node* ctl, Node* obj) { 791 _recent_alloc_ctl = ctl; 792 _recent_alloc_obj = obj; 793 } 794 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; 795 _dead_node_count++; 796 } 797 void reset_dead_node_list() { _dead_node_list.reset(); 798 _dead_node_count = 0; 799 } 800 uint live_nodes() const { 801 int val = _unique - _dead_node_count; 802 assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count); 803 return (uint) val; 804 } 805 #ifdef ASSERT 806 uint count_live_nodes_by_graph_walk(); 807 void print_missing_nodes(); 808 #endif 809 810 // Record modified nodes to check that they are put on IGVN worklist 811 void record_modified_node(Node* n) NOT_DEBUG_RETURN; 812 void remove_modified_node(Node* n) NOT_DEBUG_RETURN; 813 DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } ) 814 815 MachConstantBaseNode* mach_constant_base_node(); 816 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } 817 // Generated by adlc, true if CallNode requires MachConstantBase. 818 bool needs_clone_jvms(); 819 820 // Handy undefined Node 821 Node* top() const { return _top; } 822 823 // these are used by guys who need to know about creation and transformation of top: 824 Node* cached_top_node() { return _top; } 825 void set_cached_top_node(Node* tn); 826 827 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } 828 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } 829 Node_Notes* default_node_notes() const { return _default_node_notes; } 830 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } 831 832 Node_Notes* node_notes_at(int idx) { 833 return locate_node_notes(_node_note_array, idx, false); 834 } 835 inline bool set_node_notes_at(int idx, Node_Notes* value); 836 837 // Copy notes from source to dest, if they exist. 838 // Overwrite dest only if source provides something. 839 // Return true if information was moved. 840 bool copy_node_notes_to(Node* dest, Node* source); 841 842 // Workhorse function to sort out the blocked Node_Notes array: 843 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, 844 int idx, bool can_grow = false); 845 846 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); 847 848 // Type management 849 Arena* type_arena() { return _type_arena; } 850 Dict* type_dict() { return _type_dict; } 851 size_t type_last_size() { return _type_last_size; } 852 int num_alias_types() { return _num_alias_types; } 853 854 void init_type_arena() { _type_arena = &_Compile_types; } 855 void set_type_arena(Arena* a) { _type_arena = a; } 856 void set_type_dict(Dict* d) { _type_dict = d; } 857 void set_type_last_size(size_t sz) { _type_last_size = sz; } 858 859 const TypeFunc* last_tf(ciMethod* m) { 860 return (m == _last_tf_m) ? _last_tf : NULL; 861 } 862 void set_last_tf(ciMethod* m, const TypeFunc* tf) { 863 assert(m != NULL || tf == NULL, ""); 864 _last_tf_m = m; 865 _last_tf = tf; 866 } 867 868 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 869 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } 870 bool have_alias_type(const TypePtr* adr_type); 871 AliasType* alias_type(ciField* field); 872 873 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); } 874 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } 875 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } 876 877 // Building nodes 878 void rethrow_exceptions(JVMState* jvms); 879 void return_values(JVMState* jvms); 880 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); 881 882 // Decide how to build a call. 883 // The profile factor is a discount to apply to this site's interp. profile. 884 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, 885 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL, 886 bool allow_intrinsics = true, bool delayed_forbidden = false); 887 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { 888 return should_delay_string_inlining(call_method, jvms) || 889 should_delay_boxing_inlining(call_method, jvms); 890 } 891 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); 892 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); 893 894 // Helper functions to identify inlining potential at call-site 895 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, 896 ciKlass* holder, ciMethod* callee, 897 const TypeOopPtr* receiver_type, bool is_virtual, 898 bool &call_does_dispatch, int &vtable_index, 899 bool check_access = true); 900 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 901 ciMethod* callee, const TypeOopPtr* receiver_type, 902 bool check_access = true); 903 904 // Report if there were too many traps at a current method and bci. 905 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 906 // If there is no MDO at all, report no trap unless told to assume it. 907 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 908 // This version, unspecific to a particular bci, asks if 909 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. 910 bool too_many_traps(Deoptimization::DeoptReason reason, 911 // Privately used parameter for logging: 912 ciMethodData* logmd = NULL); 913 // Report if there were too many recompiles at a method and bci. 914 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 915 // Report if there were too many traps or recompiles at a method and bci. 916 bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) { 917 return too_many_traps(method, bci, reason) || 918 too_many_recompiles(method, bci, reason); 919 } 920 // Return a bitset with the reasons where deoptimization is allowed, 921 // i.e., where there were not too many uncommon traps. 922 int _allowed_reasons; 923 int allowed_deopt_reasons() { return _allowed_reasons; } 924 void set_allowed_deopt_reasons(); 925 926 // Parsing, optimization 927 PhaseGVN* initial_gvn() { return _initial_gvn; } 928 Unique_Node_List* for_igvn() { return _for_igvn; } 929 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. 930 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } 931 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } 932 933 // Replace n by nn using initial_gvn, calling hash_delete and 934 // record_for_igvn as needed. 935 void gvn_replace_by(Node* n, Node* nn); 936 937 938 void identify_useful_nodes(Unique_Node_List &useful); 939 void update_dead_node_list(Unique_Node_List &useful); 940 void remove_useless_nodes (Unique_Node_List &useful); 941 942 WarmCallInfo* warm_calls() const { return _warm_calls; } 943 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } 944 WarmCallInfo* pop_warm_call(); 945 946 // Record this CallGenerator for inlining at the end of parsing. 947 void add_late_inline(CallGenerator* cg) { 948 _late_inlines.insert_before(_late_inlines_pos, cg); 949 _late_inlines_pos++; 950 } 951 952 void prepend_late_inline(CallGenerator* cg) { 953 _late_inlines.insert_before(0, cg); 954 } 955 956 void add_string_late_inline(CallGenerator* cg) { 957 _string_late_inlines.push(cg); 958 } 959 960 void add_boxing_late_inline(CallGenerator* cg) { 961 _boxing_late_inlines.push(cg); 962 } 963 964 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); 965 966 void process_print_inlining(); 967 void dump_print_inlining(); 968 969 bool over_inlining_cutoff() const { 970 if (!inlining_incrementally()) { 971 return unique() > (uint)NodeCountInliningCutoff; 972 } else { 973 // Give some room for incremental inlining algorithm to "breathe" 974 // and avoid thrashing when live node count is close to the limit. 975 // Keep in mind that live_nodes() isn't accurate during inlining until 976 // dead node elimination step happens (see Compile::inline_incrementally). 977 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10; 978 } 979 } 980 981 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; } 982 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; } 983 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; } 984 985 bool inline_incrementally_one(); 986 void inline_incrementally_cleanup(PhaseIterGVN& igvn); 987 void inline_incrementally(PhaseIterGVN& igvn); 988 void inline_string_calls(bool parse_time); 989 void inline_boxing_calls(PhaseIterGVN& igvn); 990 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode); 991 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn); 992 993 // Matching, CFG layout, allocation, code generation 994 PhaseCFG* cfg() { return _cfg; } 995 bool has_java_calls() const { return _java_calls > 0; } 996 int java_calls() const { return _java_calls; } 997 int inner_loops() const { return _inner_loops; } 998 Matcher* matcher() { return _matcher; } 999 PhaseRegAlloc* regalloc() { return _regalloc; } 1000 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } 1001 Arena* indexSet_arena() { return _indexSet_arena; } 1002 void* indexSet_free_block_list() { return _indexSet_free_block_list; } 1003 DebugInformationRecorder* debug_info() { return env()->debug_info(); } 1004 1005 void update_interpreter_frame_size(int size) { 1006 if (_interpreter_frame_size < size) { 1007 _interpreter_frame_size = size; 1008 } 1009 } 1010 1011 void set_matcher(Matcher* m) { _matcher = m; } 1012 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } 1013 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } 1014 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } 1015 1016 void set_java_calls(int z) { _java_calls = z; } 1017 void set_inner_loops(int z) { _inner_loops = z; } 1018 1019 Dependencies* dependencies() { return env()->dependencies(); } 1020 1021 // Major entry point. Given a Scope, compile the associated method. 1022 // For normal compilations, entry_bci is InvocationEntryBci. For on stack 1023 // replacement, entry_bci indicates the bytecode for which to compile a 1024 // continuation. 1025 Compile(ciEnv* ci_env, ciMethod* target, 1026 int entry_bci, bool subsume_loads, bool do_escape_analysis, 1027 bool eliminate_boxing, DirectiveSet* directive); 1028 1029 // Second major entry point. From the TypeFunc signature, generate code 1030 // to pass arguments from the Java calling convention to the C calling 1031 // convention. 1032 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), 1033 address stub_function, const char *stub_name, 1034 int is_fancy_jump, bool pass_tls, 1035 bool save_arg_registers, bool return_pc, DirectiveSet* directive); 1036 1037 // From the TypeFunc signature, generate code to pass arguments 1038 // from Compiled calling convention to Interpreter's calling convention 1039 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); 1040 1041 // From the TypeFunc signature, generate code to pass arguments 1042 // from Interpreter's calling convention to Compiler's calling convention 1043 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); 1044 1045 // Are we compiling a method? 1046 bool has_method() { return method() != NULL; } 1047 1048 // Maybe print some information about this compile. 1049 void print_compile_messages(); 1050 1051 // Final graph reshaping, a post-pass after the regular optimizer is done. 1052 bool final_graph_reshaping(); 1053 1054 // returns true if adr is completely contained in the given alias category 1055 bool must_alias(const TypePtr* adr, int alias_idx); 1056 1057 // returns true if adr overlaps with the given alias category 1058 bool can_alias(const TypePtr* adr, int alias_idx); 1059 1060 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. 1061 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); 1062 1063 // Stack slots that may be unused by the calling convention but must 1064 // otherwise be preserved. On Intel this includes the return address. 1065 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 1066 uint in_preserve_stack_slots(); 1067 1068 // "Top of Stack" slots that may be unused by the calling convention but must 1069 // otherwise be preserved. 1070 // On Intel these are not necessary and the value can be zero. 1071 // On Sparc this describes the words reserved for storing a register window 1072 // when an interrupt occurs. 1073 static uint out_preserve_stack_slots(); 1074 1075 // Number of outgoing stack slots killed above the out_preserve_stack_slots 1076 // for calls to C. Supports the var-args backing area for register parms. 1077 uint varargs_C_out_slots_killed() const; 1078 1079 // Number of Stack Slots consumed by a synchronization entry 1080 int sync_stack_slots() const; 1081 1082 // Compute the name of old_SP. See <arch>.ad for frame layout. 1083 OptoReg::Name compute_old_SP(); 1084 1085 private: 1086 // Phase control: 1087 void Init(int aliaslevel); // Prepare for a single compilation 1088 int Inline_Warm(); // Find more inlining work. 1089 void Finish_Warm(); // Give up on further inlines. 1090 void Optimize(); // Given a graph, optimize it 1091 void Code_Gen(); // Generate code from a graph 1092 1093 // Management of the AliasType table. 1094 void grow_alias_types(); 1095 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 1096 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 1097 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field); 1098 1099 void verify_top(Node*) const PRODUCT_RETURN; 1100 1101 // Intrinsic setup. 1102 void register_library_intrinsics(); // initializer 1103 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor 1104 int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper 1105 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn 1106 void register_intrinsic(CallGenerator* cg); // update fn 1107 1108 #ifndef PRODUCT 1109 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; 1110 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; 1111 #endif 1112 // Function calls made by the public function final_graph_reshaping. 1113 // No need to be made public as they are not called elsewhere. 1114 void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc); 1115 void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop); 1116 void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ); 1117 void eliminate_redundant_card_marks(Node* n); 1118 1119 // Logic cone optimization. 1120 void optimize_logic_cones(PhaseIterGVN &igvn); 1121 void collect_logic_cone_roots(Unique_Node_List& list); 1122 void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited); 1123 bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs); 1124 uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs); 1125 uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3); 1126 Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs); 1127 1128 public: 1129 1130 // Note: Histogram array size is about 1 Kb. 1131 enum { // flag bits: 1132 _intrinsic_worked = 1, // succeeded at least once 1133 _intrinsic_failed = 2, // tried it but it failed 1134 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) 1135 _intrinsic_virtual = 8, // was seen in the virtual form (rare) 1136 _intrinsic_both = 16 // was seen in the non-virtual form (usual) 1137 }; 1138 // Update histogram. Return boolean if this is a first-time occurrence. 1139 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, 1140 bool is_virtual, int flags) PRODUCT_RETURN0; 1141 static void print_intrinsic_statistics() PRODUCT_RETURN; 1142 1143 // Graph verification code 1144 // Walk the node list, verifying that there is a one-to-one 1145 // correspondence between Use-Def edges and Def-Use edges 1146 // The option no_dead_code enables stronger checks that the 1147 // graph is strongly connected from root in both directions. 1148 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; 1149 1150 // End-of-run dumps. 1151 static void print_statistics() PRODUCT_RETURN; 1152 1153 // Verify ADLC assumptions during startup 1154 static void adlc_verification() PRODUCT_RETURN; 1155 1156 // Definitions of pd methods 1157 static void pd_compiler2_init(); 1158 1159 // Static parse-time type checking logic for gen_subtype_check: 1160 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test }; 1161 int static_subtype_check(ciKlass* superk, ciKlass* subk); 1162 1163 static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype, 1164 // Optional control dependency (for example, on range check) 1165 Node* ctrl = NULL); 1166 1167 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) 1168 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl); 1169 1170 // Auxiliary method for randomized fuzzing/stressing 1171 static bool randomized_select(int count); 1172 1173 // supporting clone_map 1174 CloneMap& clone_map(); 1175 void set_clone_map(Dict* d); 1176 1177 bool needs_clinit_barrier(ciField* ik, ciMethod* accessing_method); 1178 bool needs_clinit_barrier(ciMethod* ik, ciMethod* accessing_method); 1179 bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method); 1180 1181 #ifdef IA32 1182 private: 1183 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result 1184 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results 1185 1186 // Remember if this compilation changes hardware mode to 24-bit precision. 1187 void set_24_bit_selection_and_mode(bool selection, bool mode) { 1188 _select_24_bit_instr = selection; 1189 _in_24_bit_fp_mode = mode; 1190 } 1191 1192 public: 1193 bool select_24_bit_instr() const { return _select_24_bit_instr; } 1194 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } 1195 #endif // IA32 1196 #ifdef ASSERT 1197 bool _type_verify_symmetry; 1198 #endif 1199 }; 1200 1201 #endif // SHARE_OPTO_COMPILE_HPP