1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_PARSE_HPP 26 #define SHARE_VM_OPTO_PARSE_HPP 27 28 #include "ci/ciMethodData.hpp" 29 #include "ci/ciTypeFlow.hpp" 30 #include "compiler/methodLiveness.hpp" 31 #include "libadt/vectset.hpp" 32 #include "oops/generateOopMap.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/subnode.hpp" 35 36 class BytecodeParseHistogram; 37 class InlineTree; 38 class Parse; 39 class SwitchRange; 40 41 42 //------------------------------InlineTree------------------------------------- 43 class InlineTree : public ResourceObj { 44 Compile* C; // cache 45 JVMState* _caller_jvms; // state of caller 46 ciMethod* _method; // method being called by the caller_jvms 47 InlineTree* _caller_tree; 48 uint _count_inline_bcs; // Accumulated count of inlined bytecodes 49 // Call-site count / interpreter invocation count, scaled recursively. 50 // Always between 0.0 and 1.0. Represents the percentage of the method's 51 // total execution time used at this call site. 52 const float _site_invoke_ratio; 53 const int _site_depth_adjust; 54 float compute_callee_frequency( int caller_bci ) const; 55 56 GrowableArray<InlineTree*> _subtrees; 57 friend class Compile; 58 59 protected: 60 InlineTree(Compile* C, 61 const InlineTree* caller_tree, 62 ciMethod* callee_method, 63 JVMState* caller_jvms, 64 int caller_bci, 65 float site_invoke_ratio, 66 int site_depth_adjust); 67 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, 68 JVMState* caller_jvms, 69 int caller_bci); 70 const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); 71 const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; 72 const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; 73 void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const; 74 75 InlineTree *caller_tree() const { return _caller_tree; } 76 InlineTree* callee_at(int bci, ciMethod* m) const; 77 int inline_depth() const { return stack_depth() + _site_depth_adjust; } 78 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } 79 80 public: 81 static InlineTree* build_inline_tree_root(); 82 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); 83 84 // For temporary (stack-allocated, stateless) ilts: 85 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust); 86 87 // InlineTree enum 88 enum InlineStyle { 89 Inline_do_not_inline = 0, // 90 Inline_cha_is_monomorphic = 1, // 91 Inline_type_profile_monomorphic = 2 // 92 }; 93 94 // See if it is OK to inline. 95 // The receiver is the inline tree for the caller. 96 // 97 // The result is a temperature indication. If it is hot or cold, 98 // inlining is immediate or undesirable. Otherwise, the info block 99 // returned is newly allocated and may be enqueued. 100 // 101 // If the method is inlinable, a new inline subtree is created on the fly, 102 // and may be accessed by find_subtree_from_root. 103 // The call_method is the dest_method for a special or static invocation. 104 // The call_method is an optimized virtual method candidate otherwise. 105 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci); 106 107 // Information about inlined method 108 JVMState* caller_jvms() const { return _caller_jvms; } 109 ciMethod *method() const { return _method; } 110 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } 111 uint count_inline_bcs() const { return _count_inline_bcs; } 112 float site_invoke_ratio() const { return _site_invoke_ratio; }; 113 114 #ifndef PRODUCT 115 private: 116 uint _count_inlines; // Count of inlined methods 117 public: 118 // Debug information collected during parse 119 uint count_inlines() const { return _count_inlines; }; 120 #endif 121 GrowableArray<InlineTree*> subtrees() { return _subtrees; } 122 }; 123 124 125 //----------------------------------------------------------------------------- 126 //------------------------------Parse------------------------------------------ 127 // Parse bytecodes, build a Graph 128 class Parse : public GraphKit { 129 public: 130 // Per-block information needed by the parser: 131 class Block { 132 private: 133 ciTypeFlow::Block* _flow; 134 int _pred_count; // how many predecessors in CFG? 135 int _preds_parsed; // how many of these have been parsed? 136 uint _count; // how many times executed? Currently only set by _goto's 137 bool _is_parsed; // has this block been parsed yet? 138 bool _is_handler; // is this block an exception handler? 139 bool _has_merged_backedge; // does this block have merged backedge? 140 SafePointNode* _start_map; // all values flowing into this block 141 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap 142 143 int _num_successors; // Includes only normal control flow. 144 int _all_successors; // Include exception paths also. 145 Block** _successors; 146 147 // Use init_node/init_graph to initialize Blocks. 148 // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); } 149 Block() : _live_locals(NULL,0) { ShouldNotReachHere(); } 150 151 public: 152 153 // Set up the block data structure itself. 154 void init_node(Parse* outer, int po); 155 // Set up the block's relations to other blocks. 156 void init_graph(Parse* outer); 157 158 ciTypeFlow::Block* flow() const { return _flow; } 159 int pred_count() const { return _pred_count; } 160 int preds_parsed() const { return _preds_parsed; } 161 bool is_parsed() const { return _is_parsed; } 162 bool is_handler() const { return _is_handler; } 163 void set_count( uint x ) { _count = x; } 164 uint count() const { return _count; } 165 166 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } 167 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } 168 169 // True after any predecessor flows control into this block 170 bool is_merged() const { return _start_map != NULL; } 171 172 #ifdef ASSERT 173 // True after backedge predecessor flows control into this block 174 bool has_merged_backedge() const { return _has_merged_backedge; } 175 void mark_merged_backedge(Block* pred) { 176 assert(is_SEL_head(), "should be loop head"); 177 if (pred != NULL && is_SEL_backedge(pred)) { 178 assert(is_parsed(), "block should be parsed before merging backedges"); 179 _has_merged_backedge = true; 180 } 181 } 182 #endif 183 184 // True when all non-exception predecessors have been parsed. 185 bool is_ready() const { return preds_parsed() == pred_count(); } 186 187 int num_successors() const { return _num_successors; } 188 int all_successors() const { return _all_successors; } 189 Block* successor_at(int i) const { 190 assert((uint)i < (uint)all_successors(), ""); 191 return _successors[i]; 192 } 193 Block* successor_for_bci(int bci); 194 195 int start() const { return flow()->start(); } 196 int limit() const { return flow()->limit(); } 197 int rpo() const { return flow()->rpo(); } 198 int start_sp() const { return flow()->stack_size(); } 199 200 bool is_loop_head() const { return flow()->is_loop_head(); } 201 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } 202 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } 203 bool is_invariant_local(uint i) const { 204 const JVMState* jvms = start_map()->jvms(); 205 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; 206 return flow()->is_invariant_local(i - jvms->locoff()); 207 } 208 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } 209 210 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } 211 212 const Type* stack_type_at(int i) const; 213 const Type* local_type_at(int i) const; 214 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } 215 216 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } 217 218 // Call this just before parsing a block. 219 void mark_parsed() { 220 assert(!_is_parsed, "must parse each block exactly once"); 221 _is_parsed = true; 222 } 223 224 // Return the phi/region input index for the "current" pred, 225 // and bump the pred number. For historical reasons these index 226 // numbers are handed out in descending order. The last index is 227 // always PhiNode::Input (i.e., 1). The value returned is known 228 // as a "path number" because it distinguishes by which path we are 229 // entering the block. 230 int next_path_num() { 231 assert(preds_parsed() < pred_count(), "too many preds?"); 232 return pred_count() - _preds_parsed++; 233 } 234 235 // Add a previously unaccounted predecessor to this block. 236 // This operates by increasing the size of the block's region 237 // and all its phi nodes (if any). The value returned is a 238 // path number ("pnum"). 239 int add_new_path(); 240 241 // Initialize me by recording the parser's map. My own map must be NULL. 242 void record_state(Parse* outer); 243 }; 244 245 #ifndef PRODUCT 246 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. 247 class BytecodeParseHistogram : public ResourceObj { 248 private: 249 enum BPHType { 250 BPH_transforms, 251 BPH_values 252 }; 253 static bool _initialized; 254 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; 255 static uint _nodes_constructed[Bytecodes::number_of_codes]; 256 static uint _nodes_transformed[Bytecodes::number_of_codes]; 257 static uint _new_values [Bytecodes::number_of_codes]; 258 259 Bytecodes::Code _initial_bytecode; 260 int _initial_node_count; 261 int _initial_transforms; 262 int _initial_values; 263 264 Parse *_parser; 265 Compile *_compiler; 266 267 // Initialization 268 static void reset(); 269 270 // Return info being collected, select with global flag 'BytecodeParseInfo' 271 int current_count(BPHType info_selector); 272 273 public: 274 BytecodeParseHistogram(Parse *p, Compile *c); 275 static bool initialized(); 276 277 // Record info when starting to parse one bytecode 278 void set_initial_state( Bytecodes::Code bc ); 279 // Record results of parsing one bytecode 280 void record_change(); 281 282 // Profile printing 283 static void print(float cutoff = 0.01F); // cutoff in percent 284 }; 285 286 public: 287 // Record work done during parsing 288 BytecodeParseHistogram* _parse_histogram; 289 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } 290 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } 291 #endif 292 293 private: 294 friend class Block; 295 296 // Variables which characterize this compilation as a whole: 297 298 JVMState* _caller; // JVMS which carries incoming args & state. 299 float _expected_uses; // expected number of calls to this code 300 float _prof_factor; // discount applied to my profile counts 301 int _depth; // Inline tree depth, for debug printouts 302 const TypeFunc*_tf; // My kind of function type 303 int _entry_bci; // the osr bci or InvocationEntryBci 304 305 ciTypeFlow* _flow; // Results of previous flow pass. 306 Block* _blocks; // Array of basic-block structs. 307 int _block_count; // Number of elements in _blocks. 308 309 GraphKit _exits; // Record all normal returns and throws here. 310 bool _wrote_final; // Did we write a final field? 311 bool _count_invocations; // update and test invocation counter 312 bool _method_data_update; // update method data oop 313 314 // Variables which track Java semantics during bytecode parsing: 315 316 Block* _block; // block currently getting parsed 317 ciBytecodeStream _iter; // stream of this method's bytecodes 318 319 int _blocks_merged; // Progress meter: state merges from BB preds 320 int _blocks_parsed; // Progress meter: BBs actually parsed 321 322 const FastLockNode* _synch_lock; // FastLockNode for synchronized method 323 324 #ifndef PRODUCT 325 int _max_switch_depth; // Debugging SwitchRanges. 326 int _est_switch_depth; // Debugging SwitchRanges. 327 #endif 328 329 public: 330 // Constructor 331 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); 332 333 virtual Parse* is_Parse() const { return (Parse*)this; } 334 335 public: 336 // Accessors. 337 JVMState* caller() const { return _caller; } 338 float expected_uses() const { return _expected_uses; } 339 float prof_factor() const { return _prof_factor; } 340 int depth() const { return _depth; } 341 const TypeFunc* tf() const { return _tf; } 342 // entry_bci() -- see osr_bci, etc. 343 344 ciTypeFlow* flow() const { return _flow; } 345 // blocks() -- see rpo_at, start_block, etc. 346 int block_count() const { return _block_count; } 347 348 GraphKit& exits() { return _exits; } 349 bool wrote_final() const { return _wrote_final; } 350 void set_wrote_final(bool z) { _wrote_final = z; } 351 bool count_invocations() const { return _count_invocations; } 352 bool method_data_update() const { return _method_data_update; } 353 354 Block* block() const { return _block; } 355 ciBytecodeStream& iter() { return _iter; } 356 Bytecodes::Code bc() const { return _iter.cur_bc(); } 357 358 void set_block(Block* b) { _block = b; } 359 360 // Derived accessors: 361 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } 362 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } 363 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } 364 365 void set_parse_bci(int bci); 366 367 // Must this parse be aborted? 368 bool failing() { return C->failing(); } 369 370 Block* rpo_at(int rpo) { 371 assert(0 <= rpo && rpo < _block_count, "oob"); 372 return &_blocks[rpo]; 373 } 374 Block* start_block() { 375 return rpo_at(flow()->start_block()->rpo()); 376 } 377 // Can return NULL if the flow pass did not complete a block. 378 Block* successor_for_bci(int bci) { 379 return block()->successor_for_bci(bci); 380 } 381 382 private: 383 // Create a JVMS & map for the initial state of this method. 384 SafePointNode* create_entry_map(); 385 386 // OSR helpers 387 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); 388 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); 389 void load_interpreter_state(Node* osr_buf); 390 391 // Functions for managing basic blocks: 392 void init_blocks(); 393 void load_state_from(Block* b); 394 void store_state_to(Block* b) { b->record_state(this); } 395 396 // Parse all the basic blocks. 397 void do_all_blocks(); 398 399 // Parse the current basic block 400 void do_one_block(); 401 402 // Raise an error if we get a bad ciTypeFlow CFG. 403 void handle_missing_successor(int bci); 404 405 // first actions (before BCI 0) 406 void do_method_entry(); 407 408 // implementation of monitorenter/monitorexit 409 void do_monitor_enter(); 410 void do_monitor_exit(); 411 412 // Eagerly create phie throughout the state, to cope with back edges. 413 void ensure_phis_everywhere(); 414 415 // Merge the current mapping into the basic block starting at bci 416 void merge( int target_bci); 417 // Same as plain merge, except that it allocates a new path number. 418 void merge_new_path( int target_bci); 419 // Merge the current mapping into an exception handler. 420 void merge_exception(int target_bci); 421 // Helper: Merge the current mapping into the given basic block 422 void merge_common(Block* target, int pnum); 423 // Helper functions for merging individual cells. 424 PhiNode *ensure_phi( int idx, bool nocreate = false); 425 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); 426 // Helper to merge the current memory state into the given basic block 427 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); 428 429 // Parse this bytecode, and alter the Parsers JVM->Node mapping 430 void do_one_bytecode(); 431 432 // helper function to generate array store check 433 void array_store_check(); 434 // Helper function to generate array load 435 void array_load(BasicType etype); 436 // Helper function to generate array store 437 void array_store(BasicType etype); 438 // Helper function to compute array addressing 439 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL); 440 441 // Pass current map to exits 442 void return_current(Node* value); 443 444 // Register finalizers on return from Object.<init> 445 void call_register_finalizer(); 446 447 // Insert a compiler safepoint into the graph 448 void add_safepoint(); 449 450 // Insert a compiler safepoint into the graph, if there is a back-branch. 451 void maybe_add_safepoint(int target_bci) { 452 if (UseLoopSafepoints && target_bci <= bci()) { 453 add_safepoint(); 454 } 455 } 456 457 // Note: Intrinsic generation routines may be found in library_call.cpp. 458 459 // Helper function to setup Ideal Call nodes 460 void do_call(); 461 462 // Helper function to uncommon-trap or bailout for non-compilable call-sites 463 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); 464 465 // Helper function to identify inlining potential at call-site 466 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 467 ciMethod *dest_method, const TypeOopPtr* receiver_type); 468 469 // Helper function to setup for type-profile based inlining 470 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method); 471 472 // Helper functions for type checking bytecodes: 473 void do_checkcast(); 474 void do_instanceof(); 475 476 // Helper functions for shifting & arithmetic 477 void modf(); 478 void modd(); 479 void l2f(); 480 481 void do_irem(); 482 483 // implementation of _get* and _put* bytecodes 484 void do_getstatic() { do_field_access(true, false); } 485 void do_getfield () { do_field_access(true, true); } 486 void do_putstatic() { do_field_access(false, false); } 487 void do_putfield () { do_field_access(false, true); } 488 489 // common code for making initial checks and forming addresses 490 void do_field_access(bool is_get, bool is_field); 491 bool static_field_ok_in_clinit(ciField *field, ciMethod *method); 492 493 // common code for actually performing the load or store 494 void do_get_xxx(Node* obj, ciField* field, bool is_field); 495 void do_put_xxx(Node* obj, ciField* field, bool is_field); 496 497 // loading from a constant field or the constant pool 498 // returns false if push failed (non-perm field constants only, not ldcs) 499 bool push_constant(ciConstant con, bool require_constant = false); 500 501 // implementation of object creation bytecodes 502 void emit_guard_for_new(ciInstanceKlass* klass); 503 void do_new(); 504 void do_newarray(BasicType elemtype); 505 void do_anewarray(); 506 void do_multianewarray(); 507 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); 508 509 // implementation of jsr/ret 510 void do_jsr(); 511 void do_ret(); 512 513 float dynamic_branch_prediction(float &cnt); 514 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); 515 bool seems_never_taken(float prob); 516 bool seems_stable_comparison(BoolTest::mask btest, Node* c); 517 518 void do_ifnull(BoolTest::mask btest, Node* c); 519 void do_if(BoolTest::mask btest, Node* c); 520 int repush_if_args(); 521 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 522 Block* path, Block* other_path); 523 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask); 524 Node* jump_if_join(Node* iffalse, Node* iftrue); 525 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index); 526 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index); 527 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index); 528 529 friend class SwitchRange; 530 void do_tableswitch(); 531 void do_lookupswitch(); 532 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); 533 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); 534 535 // helper functions for methodData style profiling 536 void test_counter_against_threshold(Node* cnt, int limit); 537 void increment_and_test_invocation_counter(int limit); 538 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); 539 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 540 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 541 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); 542 543 void profile_method_entry(); 544 void profile_taken_branch(int target_bci, bool force_update = false); 545 void profile_not_taken_branch(bool force_update = false); 546 void profile_call(Node* receiver); 547 void profile_generic_call(); 548 void profile_receiver_type(Node* receiver); 549 void profile_ret(int target_bci); 550 void profile_null_checkcast(); 551 void profile_switch_case(int table_index); 552 553 // helper function for call statistics 554 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; 555 556 Node_Notes* make_node_notes(Node_Notes* caller_nn); 557 558 // Helper functions for handling normal and abnormal exits. 559 void build_exits(); 560 561 // Fix up all exceptional control flow exiting a single bytecode. 562 void do_exceptions(); 563 564 // Fix up all exiting control flow at the end of the parse. 565 void do_exits(); 566 567 // Add Catch/CatchProjs 568 // The call is either a Java call or the VM's rethrow stub 569 void catch_call_exceptions(ciExceptionHandlerStream&); 570 571 // Handle all exceptions thrown by the inlined method. 572 // Also handles exceptions for individual bytecodes. 573 void catch_inline_exceptions(SafePointNode* ex_map); 574 575 // Merge the given map into correct exceptional exit state. 576 // Assumes that there is no applicable local handler. 577 void throw_to_exit(SafePointNode* ex_map); 578 579 public: 580 #ifndef PRODUCT 581 // Handle PrintOpto, etc. 582 void show_parse_info(); 583 void dump_map_adr_mem() const; 584 static void print_statistics(); // Print some performance counters 585 void dump(); 586 void dump_bci(int bci); 587 #endif 588 }; 589 590 #endif // SHARE_VM_OPTO_PARSE_HPP