1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_COMPILE_HPP 26 #define SHARE_VM_OPTO_COMPILE_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "ci/compilerInterface.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "code/exceptionHandlerTable.hpp" 32 #include "compiler/compilerOracle.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "libadt/dict.hpp" 35 #include "libadt/port.hpp" 36 #include "libadt/vectset.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/phasetype.hpp" 40 #include "opto/phase.hpp" 41 #include "opto/regmask.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/vmThread.hpp" 44 #include "trace/tracing.hpp" 45 #include "utilities/ticks.hpp" 46 47 class Block; 48 class Bundle; 49 class C2Compiler; 50 class CallGenerator; 51 class ConnectionGraph; 52 class InlineTree; 53 class Int_Array; 54 class Matcher; 55 class MachConstantNode; 56 class MachConstantBaseNode; 57 class MachNode; 58 class MachOper; 59 class MachSafePointNode; 60 class Node; 61 class Node_Array; 62 class Node_Notes; 63 class OptoReg; 64 class PhaseCFG; 65 class PhaseGVN; 66 class PhaseIterGVN; 67 class PhaseRegAlloc; 68 class PhaseCCP; 69 class PhaseCCP_DCE; 70 class RootNode; 71 class relocInfo; 72 class ShenandoahBarrierNode; 73 class Scope; 74 class StartNode; 75 class SafePointNode; 76 class JVMState; 77 class Type; 78 class TypeData; 79 class TypeInt; 80 class TypePtr; 81 class TypeOopPtr; 82 class TypeFunc; 83 class Unique_Node_List; 84 class nmethod; 85 class WarmCallInfo; 86 class Node_Stack; 87 struct Final_Reshape_Counts; 88 89 //------------------------------Compile---------------------------------------- 90 // This class defines a top-level Compiler invocation. 91 92 class Compile : public Phase { 93 friend class VMStructs; 94 95 public: 96 // Fixed alias indexes. (See also MergeMemNode.) 97 enum { 98 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) 99 AliasIdxBot = 2, // pseudo-index, aliases to everything 100 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM 101 }; 102 103 // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler); 104 // Integrated with logging. If logging is turned on, and dolog is true, 105 // then brackets are put into the log, with time stamps and node counts. 106 // (The time collection itself is always conditionalized on TimeCompiler.) 107 class TracePhase : public TraceTime { 108 private: 109 Compile* C; 110 CompileLog* _log; 111 const char* _phase_name; 112 bool _dolog; 113 public: 114 TracePhase(const char* name, elapsedTimer* accumulator, bool dolog); 115 ~TracePhase(); 116 }; 117 118 // Information per category of alias (memory slice) 119 class AliasType { 120 private: 121 friend class Compile; 122 123 int _index; // unique index, used with MergeMemNode 124 const TypePtr* _adr_type; // normalized address type 125 ciField* _field; // relevant instance field, or null if none 126 const Type* _element; // relevant array element type, or null if none 127 bool _is_rewritable; // false if the memory is write-once only 128 int _general_index; // if this is type is an instance, the general 129 // type that this is an instance of 130 131 void Init(int i, const TypePtr* at); 132 133 public: 134 int index() const { return _index; } 135 const TypePtr* adr_type() const { return _adr_type; } 136 ciField* field() const { return _field; } 137 const Type* element() const { return _element; } 138 bool is_rewritable() const { return _is_rewritable; } 139 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } 140 int general_index() const { return (_general_index != 0) ? _general_index : _index; } 141 142 void set_rewritable(bool z) { _is_rewritable = z; } 143 void set_field(ciField* f) { 144 assert(!_field,""); 145 _field = f; 146 if (f->is_final() || f->is_stable()) { 147 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops. 148 _is_rewritable = false; 149 } 150 } 151 void set_element(const Type* e) { 152 assert(_element == NULL, ""); 153 _element = e; 154 } 155 156 BasicType basic_type() const; 157 158 void print_on(outputStream* st) PRODUCT_RETURN; 159 }; 160 161 enum { 162 logAliasCacheSize = 6, 163 AliasCacheSize = (1<<logAliasCacheSize) 164 }; 165 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type 166 enum { 167 trapHistLength = MethodData::_trap_hist_limit 168 }; 169 170 // Constant entry of the constant table. 171 class Constant { 172 private: 173 BasicType _type; 174 union { 175 jvalue _value; 176 Metadata* _metadata; 177 } _v; 178 int _offset; // offset of this constant (in bytes) relative to the constant table base. 179 float _freq; 180 bool _can_be_reused; // true (default) if the value can be shared with other users. 181 182 public: 183 Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _v._value.l = 0; } 184 Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) : 185 _type(type), 186 _offset(-1), 187 _freq(freq), 188 _can_be_reused(can_be_reused) 189 { 190 assert(type != T_METADATA, "wrong constructor"); 191 _v._value = value; 192 } 193 Constant(Metadata* metadata, bool can_be_reused = true) : 194 _type(T_METADATA), 195 _offset(-1), 196 _freq(0.0f), 197 _can_be_reused(can_be_reused) 198 { 199 _v._metadata = metadata; 200 } 201 202 bool operator==(const Constant& other); 203 204 BasicType type() const { return _type; } 205 206 jlong get_jlong() const { return _v._value.j; } 207 jfloat get_jfloat() const { return _v._value.f; } 208 jdouble get_jdouble() const { return _v._value.d; } 209 jobject get_jobject() const { return _v._value.l; } 210 211 Metadata* get_metadata() const { return _v._metadata; } 212 213 int offset() const { return _offset; } 214 void set_offset(int offset) { _offset = offset; } 215 216 float freq() const { return _freq; } 217 void inc_freq(float freq) { _freq += freq; } 218 219 bool can_be_reused() const { return _can_be_reused; } 220 }; 221 222 // Constant table. 223 class ConstantTable { 224 private: 225 GrowableArray<Constant> _constants; // Constants of this table. 226 int _size; // Size in bytes the emitted constant table takes (including padding). 227 int _table_base_offset; // Offset of the table base that gets added to the constant offsets. 228 int _nof_jump_tables; // Number of jump-tables in this constant table. 229 230 static int qsort_comparator(Constant* a, Constant* b); 231 232 // We use negative frequencies to keep the order of the 233 // jump-tables in which they were added. Otherwise we get into 234 // trouble with relocation. 235 float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); } 236 237 public: 238 ConstantTable() : 239 _size(-1), 240 _table_base_offset(-1), // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit). 241 _nof_jump_tables(0) 242 {} 243 244 int size() const { assert(_size != -1, "not calculated yet"); return _size; } 245 246 int calculate_table_base_offset() const; // AD specific 247 void set_table_base_offset(int x) { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; } 248 int table_base_offset() const { assert(_table_base_offset != -1, "not set yet"); return _table_base_offset; } 249 250 void emit(CodeBuffer& cb); 251 252 // Returns the offset of the last entry (the top) of the constant table. 253 int top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); } 254 255 void calculate_offsets_and_size(); 256 int find_offset(Constant& con) const; 257 258 void add(Constant& con); 259 Constant add(MachConstantNode* n, BasicType type, jvalue value); 260 Constant add(Metadata* metadata); 261 Constant add(MachConstantNode* n, MachOper* oper); 262 Constant add(MachConstantNode* n, jfloat f) { 263 jvalue value; value.f = f; 264 return add(n, T_FLOAT, value); 265 } 266 Constant add(MachConstantNode* n, jdouble d) { 267 jvalue value; value.d = d; 268 return add(n, T_DOUBLE, value); 269 } 270 271 // Jump-table 272 Constant add_jump_table(MachConstantNode* n); 273 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const; 274 }; 275 276 private: 277 // Fixed parameters to this compilation. 278 const int _compile_id; 279 const bool _save_argument_registers; // save/restore arg regs for trampolines 280 const bool _subsume_loads; // Load can be matched as part of a larger op. 281 const bool _do_escape_analysis; // Do escape analysis. 282 const bool _eliminate_boxing; // Do boxing elimination. 283 ciMethod* _method; // The method being compiled. 284 int _entry_bci; // entry bci for osr methods. 285 const TypeFunc* _tf; // My kind of signature 286 InlineTree* _ilt; // Ditto (temporary). 287 address _stub_function; // VM entry for stub being compiled, or NULL 288 const char* _stub_name; // Name of stub or adapter being compiled, or NULL 289 address _stub_entry_point; // Compile code entry for generated stub, or NULL 290 291 // Control of this compilation. 292 int _num_loop_opts; // Number of iterations for doing loop optimiztions 293 int _max_inline_size; // Max inline size for this compilation 294 int _freq_inline_size; // Max hot method inline size for this compilation 295 int _fixed_slots; // count of frame slots not allocated by the register 296 // allocator i.e. locks, original deopt pc, etc. 297 uintx _max_node_limit; // Max unique node count during a single compilation. 298 // For deopt 299 int _orig_pc_slot; 300 int _orig_pc_slot_offset_in_bytes; 301 302 int _major_progress; // Count of something big happening 303 bool _inlining_progress; // progress doing incremental inlining? 304 bool _inlining_incrementally;// Are we doing incremental inlining (post parse) 305 bool _has_loops; // True if the method _may_ have some loops 306 bool _has_split_ifs; // True if the method _may_ have some split-if 307 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. 308 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated 309 bool _has_boxed_value; // True if a boxed object is allocated 310 int _max_vector_size; // Maximum size of generated vectors 311 uint _trap_hist[trapHistLength]; // Cumulative traps 312 bool _trap_can_recompile; // Have we emitted a recompiling trap? 313 uint _decompile_count; // Cumulative decompilation counts. 314 bool _do_inlining; // True if we intend to do inlining 315 bool _do_scheduling; // True if we intend to do scheduling 316 bool _do_freq_based_layout; // True if we intend to do frequency based block layout 317 bool _do_count_invocations; // True if we generate code to count invocations 318 bool _do_method_data_update; // True if we generate code to update MethodData*s 319 int _AliasLevel; // Locally-adjusted version of AliasLevel flag. 320 bool _print_assembly; // True if we should dump assembly code for this compilation 321 bool _print_inlining; // True if we should print inlining for this compilation 322 bool _print_intrinsics; // True if we should print intrinsics for this compilation 323 #ifndef PRODUCT 324 bool _trace_opto_output; 325 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing 326 #endif 327 bool _has_irreducible_loop; // Found irreducible loops 328 // JSR 292 329 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. 330 RTMState _rtm_state; // State of Restricted Transactional Memory usage 331 332 // Compilation environment. 333 Arena _comp_arena; // Arena with lifetime equivalent to Compile 334 ciEnv* _env; // CI interface 335 CompileLog* _log; // from CompilerThread 336 const char* _failure_reason; // for record_failure/failing pattern 337 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. 338 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. 339 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. 340 GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common 341 GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency 342 GrowableArray<ShenandoahBarrierNode*>* _shenandoah_barriers; 343 ConnectionGraph* _congraph; 344 #ifndef PRODUCT 345 IdealGraphPrinter* _printer; 346 #endif 347 348 349 // Node management 350 uint _unique; // Counter for unique Node indices 351 VectorSet _dead_node_list; // Set of dead nodes 352 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N). 353 // So use this to keep count and make the call O(1). 354 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> 355 Arena _node_arena; // Arena for new-space Nodes 356 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform 357 RootNode* _root; // Unique root of compilation, or NULL after bail-out. 358 Node* _top; // Unique top node. (Reset by various phases.) 359 360 Node* _immutable_memory; // Initial memory state 361 362 Node* _recent_alloc_obj; 363 Node* _recent_alloc_ctl; 364 365 // Constant table 366 ConstantTable _constant_table; // The constant table for this compile. 367 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. 368 369 370 // Blocked array of debugging and profiling information, 371 // tracked per node. 372 enum { _log2_node_notes_block_size = 8, 373 _node_notes_block_size = (1<<_log2_node_notes_block_size) 374 }; 375 GrowableArray<Node_Notes*>* _node_note_array; 376 Node_Notes* _default_node_notes; // default notes for new nodes 377 378 // After parsing and every bulk phase we hang onto the Root instruction. 379 // The RootNode instruction is where the whole program begins. It produces 380 // the initial Control and BOTTOM for everybody else. 381 382 // Type management 383 Arena _Compile_types; // Arena for all types 384 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() 385 Dict* _type_dict; // Intern table 386 void* _type_hwm; // Last allocation (see Type::operator new/delete) 387 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) 388 ciMethod* _last_tf_m; // Cache for 389 const TypeFunc* _last_tf; // TypeFunc::make 390 AliasType** _alias_types; // List of alias types seen so far. 391 int _num_alias_types; // Logical length of _alias_types 392 int _max_alias_types; // Physical length of _alias_types 393 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking 394 395 // Parsing, optimization 396 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN 397 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN 398 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. 399 400 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after 401 // main parsing has finished. 402 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations 403 404 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations 405 406 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) 407 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending 408 409 410 // Inlining may not happen in parse order which would make 411 // PrintInlining output confusing. Keep track of PrintInlining 412 // pieces in order. 413 class PrintInliningBuffer : public ResourceObj { 414 private: 415 CallGenerator* _cg; 416 stringStream* _ss; 417 418 public: 419 PrintInliningBuffer() 420 : _cg(NULL) { _ss = new stringStream(); } 421 422 stringStream* ss() const { return _ss; } 423 CallGenerator* cg() const { return _cg; } 424 void set_cg(CallGenerator* cg) { _cg = cg; } 425 }; 426 427 GrowableArray<PrintInliningBuffer>* _print_inlining_list; 428 int _print_inlining_idx; 429 430 // Only keep nodes in the expensive node list that need to be optimized 431 void cleanup_expensive_nodes(PhaseIterGVN &igvn); 432 // Use for sorting expensive nodes to bring similar nodes together 433 static int cmp_expensive_nodes(Node** n1, Node** n2); 434 // Expensive nodes list already sorted? 435 bool expensive_nodes_sorted() const; 436 // Remove the speculative part of types and clean up the graph 437 void remove_speculative_types(PhaseIterGVN &igvn); 438 439 void* _replay_inline_data; // Pointer to data loaded from file 440 441 public: 442 443 outputStream* print_inlining_stream() const { 444 return _print_inlining_list->adr_at(_print_inlining_idx)->ss(); 445 } 446 447 void print_inlining_skip(CallGenerator* cg) { 448 if (_print_inlining) { 449 _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg); 450 _print_inlining_idx++; 451 _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer()); 452 } 453 } 454 455 void print_inlining_insert(CallGenerator* cg) { 456 if (_print_inlining) { 457 for (int i = 0; i < _print_inlining_list->length(); i++) { 458 if (_print_inlining_list->adr_at(i)->cg() == cg) { 459 _print_inlining_list->insert_before(i+1, PrintInliningBuffer()); 460 _print_inlining_idx = i+1; 461 _print_inlining_list->adr_at(i)->set_cg(NULL); 462 return; 463 } 464 } 465 ShouldNotReachHere(); 466 } 467 } 468 469 void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) { 470 stringStream ss; 471 CompileTask::print_inlining(&ss, method, inline_level, bci, msg); 472 print_inlining_stream()->print("%s", ss.as_string()); 473 } 474 475 void* replay_inline_data() const { return _replay_inline_data; } 476 477 // Dump inlining replay data to the stream. 478 void dump_inline_data(outputStream* out); 479 480 private: 481 // Matching, CFG layout, allocation, code generation 482 PhaseCFG* _cfg; // Results of CFG finding 483 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result 484 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results 485 int _java_calls; // Number of java calls in the method 486 int _inner_loops; // Number of inner loops in the method 487 Matcher* _matcher; // Engine to map ideal to machine instructions 488 PhaseRegAlloc* _regalloc; // Results of register allocation. 489 int _frame_slots; // Size of total frame in stack slots 490 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries 491 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) 492 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin 493 void* _indexSet_free_block_list; // free list of IndexSet bit blocks 494 int _interpreter_frame_size; 495 496 uint _node_bundling_limit; 497 Bundle* _node_bundling_base; // Information for instruction bundling 498 499 // Instruction bits passed off to the VM 500 int _method_size; // Size of nmethod code segment in bytes 501 CodeBuffer _code_buffer; // Where the code is assembled 502 int _first_block_size; // Size of unvalidated entry point code / OSR poison code 503 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers 504 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code 505 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location) 506 static int _CompiledZap_count; // counter compared against CompileZap[First/Last] 507 BufferBlob* _scratch_buffer_blob; // For temporary code buffers. 508 relocInfo* _scratch_locs_memory; // For temporary code buffers. 509 int _scratch_const_size; // For temporary code buffers. 510 bool _in_scratch_emit_size; // true when in scratch_emit_size. 511 512 public: 513 // Accessors 514 515 // The Compile instance currently active in this (compiler) thread. 516 static Compile* current() { 517 return (Compile*) ciEnv::current()->compiler_data(); 518 } 519 520 // ID for this compilation. Useful for setting breakpoints in the debugger. 521 int compile_id() const { return _compile_id; } 522 523 // Does this compilation allow instructions to subsume loads? User 524 // instructions that subsume a load may result in an unschedulable 525 // instruction sequence. 526 bool subsume_loads() const { return _subsume_loads; } 527 /** Do escape analysis. */ 528 bool do_escape_analysis() const { return _do_escape_analysis; } 529 /** Do boxing elimination. */ 530 bool eliminate_boxing() const { return _eliminate_boxing; } 531 /** Do aggressive boxing elimination. */ 532 bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; } 533 bool save_argument_registers() const { return _save_argument_registers; } 534 535 536 // Other fixed compilation parameters. 537 ciMethod* method() const { return _method; } 538 int entry_bci() const { return _entry_bci; } 539 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } 540 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } 541 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } 542 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } 543 InlineTree* ilt() const { return _ilt; } 544 address stub_function() const { return _stub_function; } 545 const char* stub_name() const { return _stub_name; } 546 address stub_entry_point() const { return _stub_entry_point; } 547 548 // Control of this compilation. 549 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } 550 void set_fixed_slots(int n) { _fixed_slots = n; } 551 int major_progress() const { return _major_progress; } 552 void set_inlining_progress(bool z) { _inlining_progress = z; } 553 int inlining_progress() const { return _inlining_progress; } 554 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; } 555 int inlining_incrementally() const { return _inlining_incrementally; } 556 void set_major_progress() { _major_progress++; } 557 void clear_major_progress() { _major_progress = 0; } 558 int num_loop_opts() const { return _num_loop_opts; } 559 void set_num_loop_opts(int n) { _num_loop_opts = n; } 560 int max_inline_size() const { return _max_inline_size; } 561 void set_freq_inline_size(int n) { _freq_inline_size = n; } 562 int freq_inline_size() const { return _freq_inline_size; } 563 void set_max_inline_size(int n) { _max_inline_size = n; } 564 bool has_loops() const { return _has_loops; } 565 void set_has_loops(bool z) { _has_loops = z; } 566 bool has_split_ifs() const { return _has_split_ifs; } 567 void set_has_split_ifs(bool z) { _has_split_ifs = z; } 568 bool has_unsafe_access() const { return _has_unsafe_access; } 569 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 570 bool has_stringbuilder() const { return _has_stringbuilder; } 571 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } 572 bool has_boxed_value() const { return _has_boxed_value; } 573 void set_has_boxed_value(bool z) { _has_boxed_value = z; } 574 int max_vector_size() const { return _max_vector_size; } 575 void set_max_vector_size(int s) { _max_vector_size = s; } 576 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } 577 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } 578 bool trap_can_recompile() const { return _trap_can_recompile; } 579 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } 580 uint decompile_count() const { return _decompile_count; } 581 void set_decompile_count(uint c) { _decompile_count = c; } 582 bool allow_range_check_smearing() const; 583 bool do_inlining() const { return _do_inlining; } 584 void set_do_inlining(bool z) { _do_inlining = z; } 585 bool do_scheduling() const { return _do_scheduling; } 586 void set_do_scheduling(bool z) { _do_scheduling = z; } 587 bool do_freq_based_layout() const{ return _do_freq_based_layout; } 588 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } 589 bool do_count_invocations() const{ return _do_count_invocations; } 590 void set_do_count_invocations(bool z){ _do_count_invocations = z; } 591 bool do_method_data_update() const { return _do_method_data_update; } 592 void set_do_method_data_update(bool z) { _do_method_data_update = z; } 593 int AliasLevel() const { return _AliasLevel; } 594 bool print_assembly() const { return _print_assembly; } 595 void set_print_assembly(bool z) { _print_assembly = z; } 596 bool print_inlining() const { return _print_inlining; } 597 void set_print_inlining(bool z) { _print_inlining = z; } 598 bool print_intrinsics() const { return _print_intrinsics; } 599 void set_print_intrinsics(bool z) { _print_intrinsics = z; } 600 RTMState rtm_state() const { return _rtm_state; } 601 void set_rtm_state(RTMState s) { _rtm_state = s; } 602 bool use_rtm() const { return (_rtm_state & NoRTM) == 0; } 603 bool profile_rtm() const { return _rtm_state == ProfileRTM; } 604 uint max_node_limit() const { return (uint)_max_node_limit; } 605 void set_max_node_limit(uint n) { _max_node_limit = n; } 606 607 // check the CompilerOracle for special behaviours for this compile 608 bool method_has_option(const char * option) { 609 return method() != NULL && method()->has_option(option); 610 } 611 template<typename T> 612 bool method_has_option_value(const char * option, T& value) { 613 return method() != NULL && method()->has_option_value(option, value); 614 } 615 #ifndef PRODUCT 616 bool trace_opto_output() const { return _trace_opto_output; } 617 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } 618 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } 619 int _in_dump_cnt; // Required for dumping ir nodes. 620 #endif 621 bool has_irreducible_loop() const { return _has_irreducible_loop; } 622 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; } 623 624 // JSR 292 625 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 626 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 627 628 Ticks _latest_stage_start_counter; 629 630 void begin_method() { 631 #ifndef PRODUCT 632 if (_printer) _printer->begin_method(this); 633 #endif 634 C->_latest_stage_start_counter.stamp(); 635 } 636 637 void print_method(CompilerPhaseType cpt, int level = 1) { 638 EventCompilerPhase event; 639 if (event.should_commit()) { 640 event.set_starttime(C->_latest_stage_start_counter); 641 event.set_phase((u1) cpt); 642 event.set_compileID(C->_compile_id); 643 event.set_phaseLevel(level); 644 event.commit(); 645 } 646 647 648 #ifndef PRODUCT 649 if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level); 650 #endif 651 C->_latest_stage_start_counter.stamp(); 652 } 653 654 void end_method(int level = 1) { 655 EventCompilerPhase event; 656 if (event.should_commit()) { 657 event.set_starttime(C->_latest_stage_start_counter); 658 event.set_phase((u1) PHASE_END); 659 event.set_compileID(C->_compile_id); 660 event.set_phaseLevel(level); 661 event.commit(); 662 } 663 #ifndef PRODUCT 664 if (_printer) _printer->end_method(); 665 #endif 666 } 667 668 int macro_count() const { return _macro_nodes->length(); } 669 int predicate_count() const { return _predicate_opaqs->length();} 670 int expensive_count() const { return _expensive_nodes->length(); } 671 int shenandoah_barriers_count() const { return _shenandoah_barriers->length(); } 672 Node* macro_node(int idx) const { return _macro_nodes->at(idx); } 673 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} 674 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } 675 ShenandoahBarrierNode* shenandoah_barrier(int idx) const { return _shenandoah_barriers->at(idx); } 676 ConnectionGraph* congraph() { return _congraph;} 677 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} 678 void add_macro_node(Node * n) { 679 //assert(n->is_macro(), "must be a macro node"); 680 assert(!_macro_nodes->contains(n), "duplicate entry in expand list"); 681 _macro_nodes->append(n); 682 } 683 void remove_macro_node(Node * n) { 684 // this function may be called twice for a node so check 685 // that the node is in the array before attempting to remove it 686 if (_macro_nodes->contains(n)) 687 _macro_nodes->remove(n); 688 // remove from _predicate_opaqs list also if it is there 689 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ 690 _predicate_opaqs->remove(n); 691 } 692 } 693 void add_expensive_node(Node * n); 694 void remove_expensive_node(Node * n) { 695 if (_expensive_nodes->contains(n)) { 696 _expensive_nodes->remove(n); 697 } 698 } 699 void add_shenandoah_barrier(ShenandoahBarrierNode * n) { 700 assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list"); 701 _shenandoah_barriers->append(n); 702 } 703 void remove_shenandoah_barrier(ShenandoahBarrierNode * n) { 704 if (_shenandoah_barriers->contains(n)) { 705 _shenandoah_barriers->remove(n); 706 } 707 } 708 void add_predicate_opaq(Node * n) { 709 assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1"); 710 assert(_macro_nodes->contains(n), "should have already been in macro list"); 711 _predicate_opaqs->append(n); 712 } 713 714 // Range check dependent CastII nodes that can be removed after loop optimizations 715 void add_range_check_cast(Node* n); 716 void remove_range_check_cast(Node* n) { 717 if (_range_check_casts->contains(n)) { 718 _range_check_casts->remove(n); 719 } 720 } 721 Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx); } 722 int range_check_cast_count() const { return _range_check_casts->length(); } 723 // Remove all range check dependent CastIINodes. 724 void remove_range_check_casts(PhaseIterGVN &igvn); 725 726 // remove the opaque nodes that protect the predicates so that the unused checks and 727 // uncommon traps will be eliminated from the graph. 728 void cleanup_loop_predicates(PhaseIterGVN &igvn); 729 bool is_predicate_opaq(Node * n) { 730 return _predicate_opaqs->contains(n); 731 } 732 733 // Are there candidate expensive nodes for optimization? 734 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn); 735 // Check whether n1 and n2 are similar 736 static int cmp_expensive_nodes(Node* n1, Node* n2); 737 // Sort expensive nodes to locate similar expensive nodes 738 void sort_expensive_nodes(); 739 740 GrowableArray<ShenandoahBarrierNode*>* shenandoah_barriers() { return _shenandoah_barriers; } 741 742 // Compilation environment. 743 Arena* comp_arena() { return &_comp_arena; } 744 ciEnv* env() const { return _env; } 745 CompileLog* log() const { return _log; } 746 bool failing() const { return _env->failing() || _failure_reason != NULL; } 747 const char* failure_reason() { return _failure_reason; } 748 bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); } 749 750 void record_failure(const char* reason); 751 void record_method_not_compilable(const char* reason, bool all_tiers = false) { 752 // All bailouts cover "all_tiers" when TieredCompilation is off. 753 if (!TieredCompilation) all_tiers = true; 754 env()->record_method_not_compilable(reason, all_tiers); 755 // Record failure reason. 756 record_failure(reason); 757 } 758 void record_method_not_compilable_all_tiers(const char* reason) { 759 record_method_not_compilable(reason, true); 760 } 761 bool check_node_count(uint margin, const char* reason) { 762 if (live_nodes() + margin > max_node_limit()) { 763 record_method_not_compilable(reason); 764 return true; 765 } else { 766 return false; 767 } 768 } 769 770 // Node management 771 uint unique() const { return _unique; } 772 uint next_unique() { return _unique++; } 773 void set_unique(uint i) { _unique = i; } 774 static int debug_idx() { return debug_only(_debug_idx)+0; } 775 static void set_debug_idx(int i) { debug_only(_debug_idx = i); } 776 Arena* node_arena() { return &_node_arena; } 777 Arena* old_arena() { return &_old_arena; } 778 RootNode* root() const { return _root; } 779 void set_root(RootNode* r) { _root = r; } 780 StartNode* start() const; // (Derived from root.) 781 void init_start(StartNode* s); 782 Node* immutable_memory(); 783 784 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } 785 Node* recent_alloc_obj() const { return _recent_alloc_obj; } 786 void set_recent_alloc(Node* ctl, Node* obj) { 787 _recent_alloc_ctl = ctl; 788 _recent_alloc_obj = obj; 789 } 790 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; 791 _dead_node_count++; 792 } 793 bool is_dead_node(uint idx) { return _dead_node_list.test(idx) != 0; } 794 uint dead_node_count() { return _dead_node_count; } 795 void reset_dead_node_list() { _dead_node_list.Reset(); 796 _dead_node_count = 0; 797 } 798 uint live_nodes() const { 799 int val = _unique - _dead_node_count; 800 assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count)); 801 return (uint) val; 802 } 803 #ifdef ASSERT 804 uint count_live_nodes_by_graph_walk(); 805 void print_missing_nodes(); 806 #endif 807 808 // Constant table 809 ConstantTable& constant_table() { return _constant_table; } 810 811 MachConstantBaseNode* mach_constant_base_node(); 812 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } 813 // Generated by adlc, true if CallNode requires MachConstantBase. 814 bool needs_clone_jvms(); 815 816 // Handy undefined Node 817 Node* top() const { return _top; } 818 819 // these are used by guys who need to know about creation and transformation of top: 820 Node* cached_top_node() { return _top; } 821 void set_cached_top_node(Node* tn); 822 823 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } 824 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } 825 Node_Notes* default_node_notes() const { return _default_node_notes; } 826 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } 827 828 Node_Notes* node_notes_at(int idx) { 829 return locate_node_notes(_node_note_array, idx, false); 830 } 831 inline bool set_node_notes_at(int idx, Node_Notes* value); 832 833 // Copy notes from source to dest, if they exist. 834 // Overwrite dest only if source provides something. 835 // Return true if information was moved. 836 bool copy_node_notes_to(Node* dest, Node* source); 837 838 // Workhorse function to sort out the blocked Node_Notes array: 839 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, 840 int idx, bool can_grow = false); 841 842 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); 843 844 // Type management 845 Arena* type_arena() { return _type_arena; } 846 Dict* type_dict() { return _type_dict; } 847 void* type_hwm() { return _type_hwm; } 848 size_t type_last_size() { return _type_last_size; } 849 int num_alias_types() { return _num_alias_types; } 850 851 void init_type_arena() { _type_arena = &_Compile_types; } 852 void set_type_arena(Arena* a) { _type_arena = a; } 853 void set_type_dict(Dict* d) { _type_dict = d; } 854 void set_type_hwm(void* p) { _type_hwm = p; } 855 void set_type_last_size(size_t sz) { _type_last_size = sz; } 856 857 const TypeFunc* last_tf(ciMethod* m) { 858 return (m == _last_tf_m) ? _last_tf : NULL; 859 } 860 void set_last_tf(ciMethod* m, const TypeFunc* tf) { 861 assert(m != NULL || tf == NULL, ""); 862 _last_tf_m = m; 863 _last_tf = tf; 864 } 865 866 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 867 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } 868 bool have_alias_type(const TypePtr* adr_type); 869 AliasType* alias_type(ciField* field); 870 871 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); } 872 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } 873 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } 874 875 // Building nodes 876 void rethrow_exceptions(JVMState* jvms); 877 void return_values(JVMState* jvms); 878 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); 879 880 // Decide how to build a call. 881 // The profile factor is a discount to apply to this site's interp. profile. 882 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, 883 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL, 884 bool allow_intrinsics = true, bool delayed_forbidden = false); 885 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { 886 return should_delay_string_inlining(call_method, jvms) || 887 should_delay_boxing_inlining(call_method, jvms); 888 } 889 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); 890 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); 891 892 // Helper functions to identify inlining potential at call-site 893 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, 894 ciKlass* holder, ciMethod* callee, 895 const TypeOopPtr* receiver_type, bool is_virtual, 896 bool &call_does_dispatch, int &vtable_index, 897 bool check_access = true); 898 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 899 ciMethod* callee, const TypeOopPtr* receiver_type, 900 bool check_access = true); 901 902 // Report if there were too many traps at a current method and bci. 903 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 904 // If there is no MDO at all, report no trap unless told to assume it. 905 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 906 // This version, unspecific to a particular bci, asks if 907 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. 908 bool too_many_traps(Deoptimization::DeoptReason reason, 909 // Privately used parameter for logging: 910 ciMethodData* logmd = NULL); 911 // Report if there were too many recompiles at a method and bci. 912 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 913 // Return a bitset with the reasons where deoptimization is allowed, 914 // i.e., where there were not too many uncommon traps. 915 int _allowed_reasons; 916 int allowed_deopt_reasons() { return _allowed_reasons; } 917 void set_allowed_deopt_reasons(); 918 919 // Parsing, optimization 920 PhaseGVN* initial_gvn() { return _initial_gvn; } 921 Unique_Node_List* for_igvn() { return _for_igvn; } 922 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. 923 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } 924 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } 925 926 // Replace n by nn using initial_gvn, calling hash_delete and 927 // record_for_igvn as needed. 928 void gvn_replace_by(Node* n, Node* nn); 929 930 931 void identify_useful_nodes(Unique_Node_List &useful); 932 void update_dead_node_list(Unique_Node_List &useful); 933 void remove_useless_nodes (Unique_Node_List &useful); 934 935 WarmCallInfo* warm_calls() const { return _warm_calls; } 936 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } 937 WarmCallInfo* pop_warm_call(); 938 939 // Record this CallGenerator for inlining at the end of parsing. 940 void add_late_inline(CallGenerator* cg) { 941 _late_inlines.insert_before(_late_inlines_pos, cg); 942 _late_inlines_pos++; 943 } 944 945 void prepend_late_inline(CallGenerator* cg) { 946 _late_inlines.insert_before(0, cg); 947 } 948 949 void add_string_late_inline(CallGenerator* cg) { 950 _string_late_inlines.push(cg); 951 } 952 953 void add_boxing_late_inline(CallGenerator* cg) { 954 _boxing_late_inlines.push(cg); 955 } 956 957 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); 958 959 void dump_inlining(); 960 961 bool over_inlining_cutoff() const { 962 if (!inlining_incrementally()) { 963 return unique() > (uint)NodeCountInliningCutoff; 964 } else { 965 return live_nodes() > (uint)LiveNodeCountInliningCutoff; 966 } 967 } 968 969 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; } 970 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; } 971 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; } 972 973 void inline_incrementally_one(PhaseIterGVN& igvn); 974 void inline_incrementally(PhaseIterGVN& igvn); 975 void inline_string_calls(bool parse_time); 976 void inline_boxing_calls(PhaseIterGVN& igvn); 977 978 // Matching, CFG layout, allocation, code generation 979 PhaseCFG* cfg() { return _cfg; } 980 bool select_24_bit_instr() const { return _select_24_bit_instr; } 981 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } 982 bool has_java_calls() const { return _java_calls > 0; } 983 int java_calls() const { return _java_calls; } 984 int inner_loops() const { return _inner_loops; } 985 Matcher* matcher() { return _matcher; } 986 PhaseRegAlloc* regalloc() { return _regalloc; } 987 int frame_slots() const { return _frame_slots; } 988 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words' 989 int frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; } 990 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } 991 Arena* indexSet_arena() { return _indexSet_arena; } 992 void* indexSet_free_block_list() { return _indexSet_free_block_list; } 993 uint node_bundling_limit() { return _node_bundling_limit; } 994 Bundle* node_bundling_base() { return _node_bundling_base; } 995 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; } 996 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; } 997 bool starts_bundle(const Node *n) const; 998 bool need_stack_bang(int frame_size_in_bytes) const; 999 bool need_register_stack_bang() const; 1000 1001 void update_interpreter_frame_size(int size) { 1002 if (_interpreter_frame_size < size) { 1003 _interpreter_frame_size = size; 1004 } 1005 } 1006 int bang_size_in_bytes() const; 1007 1008 void set_matcher(Matcher* m) { _matcher = m; } 1009 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } 1010 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } 1011 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } 1012 1013 // Remember if this compilation changes hardware mode to 24-bit precision 1014 void set_24_bit_selection_and_mode(bool selection, bool mode) { 1015 _select_24_bit_instr = selection; 1016 _in_24_bit_fp_mode = mode; 1017 } 1018 1019 void set_java_calls(int z) { _java_calls = z; } 1020 void set_inner_loops(int z) { _inner_loops = z; } 1021 1022 // Instruction bits passed off to the VM 1023 int code_size() { return _method_size; } 1024 CodeBuffer* code_buffer() { return &_code_buffer; } 1025 int first_block_size() { return _first_block_size; } 1026 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } 1027 ExceptionHandlerTable* handler_table() { return &_handler_table; } 1028 ImplicitExceptionTable* inc_table() { return &_inc_table; } 1029 OopMapSet* oop_map_set() { return _oop_map_set; } 1030 DebugInformationRecorder* debug_info() { return env()->debug_info(); } 1031 Dependencies* dependencies() { return env()->dependencies(); } 1032 static int CompiledZap_count() { return _CompiledZap_count; } 1033 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } 1034 void init_scratch_buffer_blob(int const_size); 1035 void clear_scratch_buffer_blob(); 1036 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } 1037 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } 1038 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } 1039 1040 // emit to scratch blob, report resulting size 1041 uint scratch_emit_size(const Node* n); 1042 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; } 1043 bool in_scratch_emit_size() const { return _in_scratch_emit_size; } 1044 1045 enum ScratchBufferBlob { 1046 MAX_inst_size = 1024, 1047 MAX_locs_size = 128, // number of relocInfo elements 1048 MAX_const_size = 128, 1049 MAX_stubs_size = 128 1050 }; 1051 1052 // Major entry point. Given a Scope, compile the associated method. 1053 // For normal compilations, entry_bci is InvocationEntryBci. For on stack 1054 // replacement, entry_bci indicates the bytecode for which to compile a 1055 // continuation. 1056 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, 1057 int entry_bci, bool subsume_loads, bool do_escape_analysis, 1058 bool eliminate_boxing); 1059 1060 // Second major entry point. From the TypeFunc signature, generate code 1061 // to pass arguments from the Java calling convention to the C calling 1062 // convention. 1063 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), 1064 address stub_function, const char *stub_name, 1065 int is_fancy_jump, bool pass_tls, 1066 bool save_arg_registers, bool return_pc); 1067 1068 // From the TypeFunc signature, generate code to pass arguments 1069 // from Compiled calling convention to Interpreter's calling convention 1070 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); 1071 1072 // From the TypeFunc signature, generate code to pass arguments 1073 // from Interpreter's calling convention to Compiler's calling convention 1074 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); 1075 1076 // Are we compiling a method? 1077 bool has_method() { return method() != NULL; } 1078 1079 // Maybe print some information about this compile. 1080 void print_compile_messages(); 1081 1082 // Final graph reshaping, a post-pass after the regular optimizer is done. 1083 bool final_graph_reshaping(); 1084 1085 // returns true if adr is completely contained in the given alias category 1086 bool must_alias(const TypePtr* adr, int alias_idx); 1087 1088 // returns true if adr overlaps with the given alias category 1089 bool can_alias(const TypePtr* adr, int alias_idx); 1090 1091 // Driver for converting compiler's IR into machine code bits 1092 void Output(); 1093 1094 // Accessors for node bundling info. 1095 Bundle* node_bundling(const Node *n); 1096 bool valid_bundle_info(const Node *n); 1097 1098 // Schedule and Bundle the instructions 1099 void ScheduleAndBundle(); 1100 1101 // Build OopMaps for each GC point 1102 void BuildOopMaps(); 1103 1104 // Append debug info for the node "local" at safepoint node "sfpt" to the 1105 // "array", May also consult and add to "objs", which describes the 1106 // scalar-replaced objects. 1107 void FillLocArray( int idx, MachSafePointNode* sfpt, 1108 Node *local, GrowableArray<ScopeValue*> *array, 1109 GrowableArray<ScopeValue*> *objs ); 1110 1111 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. 1112 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); 1113 // Requres that "objs" does not contains an ObjectValue whose id matches 1114 // that of "sv. Appends "sv". 1115 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, 1116 ObjectValue* sv ); 1117 1118 // Process an OopMap Element while emitting nodes 1119 void Process_OopMap_Node(MachNode *mach, int code_offset); 1120 1121 // Initialize code buffer 1122 CodeBuffer* init_buffer(uint* blk_starts); 1123 1124 // Write out basic block data to code buffer 1125 void fill_buffer(CodeBuffer* cb, uint* blk_starts); 1126 1127 // Determine which variable sized branches can be shortened 1128 void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size); 1129 1130 // Compute the size of first NumberOfLoopInstrToAlign instructions 1131 // at the head of a loop. 1132 void compute_loop_first_inst_sizes(); 1133 1134 // Compute the information for the exception tables 1135 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels); 1136 1137 // Stack slots that may be unused by the calling convention but must 1138 // otherwise be preserved. On Intel this includes the return address. 1139 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 1140 uint in_preserve_stack_slots(); 1141 1142 // "Top of Stack" slots that may be unused by the calling convention but must 1143 // otherwise be preserved. 1144 // On Intel these are not necessary and the value can be zero. 1145 // On Sparc this describes the words reserved for storing a register window 1146 // when an interrupt occurs. 1147 static uint out_preserve_stack_slots(); 1148 1149 // Number of outgoing stack slots killed above the out_preserve_stack_slots 1150 // for calls to C. Supports the var-args backing area for register parms. 1151 uint varargs_C_out_slots_killed() const; 1152 1153 // Number of Stack Slots consumed by a synchronization entry 1154 int sync_stack_slots() const; 1155 1156 // Compute the name of old_SP. See <arch>.ad for frame layout. 1157 OptoReg::Name compute_old_SP(); 1158 1159 #ifdef ENABLE_ZAP_DEAD_LOCALS 1160 static bool is_node_getting_a_safepoint(Node*); 1161 void Insert_zap_nodes(); 1162 Node* call_zap_node(MachSafePointNode* n, int block_no); 1163 #endif 1164 1165 private: 1166 // Phase control: 1167 void Init(int aliaslevel); // Prepare for a single compilation 1168 int Inline_Warm(); // Find more inlining work. 1169 void Finish_Warm(); // Give up on further inlines. 1170 void Optimize(); // Given a graph, optimize it 1171 void Code_Gen(); // Generate code from a graph 1172 1173 // Management of the AliasType table. 1174 void grow_alias_types(); 1175 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 1176 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 1177 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field); 1178 1179 void verify_top(Node*) const PRODUCT_RETURN; 1180 1181 // Intrinsic setup. 1182 void register_library_intrinsics(); // initializer 1183 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor 1184 int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper 1185 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn 1186 void register_intrinsic(CallGenerator* cg); // update fn 1187 1188 #ifndef PRODUCT 1189 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; 1190 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; 1191 #endif 1192 // Function calls made by the public function final_graph_reshaping. 1193 // No need to be made public as they are not called elsewhere. 1194 void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc); 1195 void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ); 1196 void eliminate_redundant_card_marks(Node* n); 1197 1198 public: 1199 1200 // Note: Histogram array size is about 1 Kb. 1201 enum { // flag bits: 1202 _intrinsic_worked = 1, // succeeded at least once 1203 _intrinsic_failed = 2, // tried it but it failed 1204 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) 1205 _intrinsic_virtual = 8, // was seen in the virtual form (rare) 1206 _intrinsic_both = 16 // was seen in the non-virtual form (usual) 1207 }; 1208 // Update histogram. Return boolean if this is a first-time occurrence. 1209 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, 1210 bool is_virtual, int flags) PRODUCT_RETURN0; 1211 static void print_intrinsic_statistics() PRODUCT_RETURN; 1212 1213 // Graph verification code 1214 // Walk the node list, verifying that there is a one-to-one 1215 // correspondence between Use-Def edges and Def-Use edges 1216 // The option no_dead_code enables stronger checks that the 1217 // graph is strongly connected from root in both directions. 1218 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; 1219 1220 // Verify GC barrier patterns 1221 void verify_barriers() PRODUCT_RETURN; 1222 1223 // End-of-run dumps. 1224 static void print_statistics() PRODUCT_RETURN; 1225 1226 // Dump formatted assembly 1227 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN; 1228 void dump_pc(int *pcs, int pc_limit, Node *n); 1229 1230 // Verify ADLC assumptions during startup 1231 static void adlc_verification() PRODUCT_RETURN; 1232 1233 // Definitions of pd methods 1234 static void pd_compiler2_init(); 1235 1236 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) 1237 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl); 1238 1239 // Auxiliary method for randomized fuzzing/stressing 1240 static bool randomized_select(int count); 1241 1242 void shenandoah_eliminate_g1_wb_pre(Node* call, PhaseIterGVN* igvn); 1243 }; 1244 1245 #endif // SHARE_VM_OPTO_COMPILE_HPP