1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_PARSE_HPP 26 #define SHARE_OPTO_PARSE_HPP 27 28 #include "ci/ciMethodData.hpp" 29 #include "ci/ciTypeFlow.hpp" 30 #include "compiler/methodLiveness.hpp" 31 #include "libadt/vectset.hpp" 32 #include "oops/generateOopMap.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/subnode.hpp" 35 36 class BytecodeParseHistogram; 37 class InlineTree; 38 class Parse; 39 class SwitchRange; 40 41 42 //------------------------------InlineTree------------------------------------- 43 class InlineTree : public ResourceObj { 44 friend class VMStructs; 45 46 Compile* C; // cache 47 JVMState* _caller_jvms; // state of caller 48 ciMethod* _method; // method being called by the caller_jvms 49 InlineTree* _caller_tree; 50 uint _count_inline_bcs; // Accumulated count of inlined bytecodes 51 // Call-site count / interpreter invocation count, scaled recursively. 52 // Always between 0.0 and 1.0. Represents the percentage of the method's 53 // total execution time used at this call site. 54 const float _site_invoke_ratio; 55 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted) 56 float compute_callee_frequency( int caller_bci ) const; 57 58 GrowableArray<InlineTree*> _subtrees; 59 60 bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method); 61 62 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN; 63 const char* _msg; 64 protected: 65 InlineTree(Compile* C, 66 const InlineTree* caller_tree, 67 ciMethod* callee_method, 68 JVMState* caller_jvms, 69 int caller_bci, 70 float site_invoke_ratio, 71 int max_inline_level); 72 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, 73 JVMState* caller_jvms, 74 int caller_bci); 75 bool try_to_inline(ciMethod* callee_method, 76 ciMethod* caller_method, 77 int caller_bci, 78 JVMState* jvms, 79 ciCallProfile& profile, 80 WarmCallInfo* wci_result, 81 bool& should_delay); 82 bool should_inline(ciMethod* callee_method, 83 ciMethod* caller_method, 84 int caller_bci, 85 ciCallProfile& profile, 86 WarmCallInfo* wci_result); 87 bool should_not_inline(ciMethod* callee_method, 88 ciMethod* caller_method, 89 JVMState* jvms, 90 WarmCallInfo* wci_result); 91 void print_inlining(ciMethod* callee_method, int caller_bci, 92 ciMethod* caller_method, bool success) const; 93 94 InlineTree* caller_tree() const { return _caller_tree; } 95 InlineTree* callee_at(int bci, ciMethod* m) const; 96 int inline_level() const { return stack_depth(); } 97 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } 98 const char* msg() const { return _msg; } 99 void set_msg(const char* msg) { _msg = msg; } 100 public: 101 static const char* check_can_parse(ciMethod* callee); 102 103 static InlineTree* build_inline_tree_root(); 104 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee); 105 106 // For temporary (stack-allocated, stateless) ilts: 107 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level); 108 109 // See if it is OK to inline. 110 // The receiver is the inline tree for the caller. 111 // 112 // The result is a temperature indication. If it is hot or cold, 113 // inlining is immediate or undesirable. Otherwise, the info block 114 // returned is newly allocated and may be enqueued. 115 // 116 // If the method is inlinable, a new inline subtree is created on the fly, 117 // and may be accessed by find_subtree_from_root. 118 // The call_method is the dest_method for a special or static invocation. 119 // The call_method is an optimized virtual method candidate otherwise. 120 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay); 121 122 // Information about inlined method 123 JVMState* caller_jvms() const { return _caller_jvms; } 124 ciMethod *method() const { return _method; } 125 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } 126 uint count_inline_bcs() const { return _count_inline_bcs; } 127 float site_invoke_ratio() const { return _site_invoke_ratio; }; 128 129 #ifndef PRODUCT 130 private: 131 uint _count_inlines; // Count of inlined methods 132 public: 133 // Debug information collected during parse 134 uint count_inlines() const { return _count_inlines; }; 135 #endif 136 GrowableArray<InlineTree*> subtrees() { return _subtrees; } 137 138 void print_value_on(outputStream* st) const PRODUCT_RETURN; 139 140 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation 141 bool forced_inline() const { return _forced_inline; } 142 // Count number of nodes in this subtree 143 int count() const; 144 // Dump inlining replay data to the stream. 145 void dump_replay_data(outputStream* out); 146 }; 147 148 149 //----------------------------------------------------------------------------- 150 //------------------------------Parse------------------------------------------ 151 // Parse bytecodes, build a Graph 152 class Parse : public GraphKit { 153 public: 154 // Per-block information needed by the parser: 155 class Block { 156 private: 157 ciTypeFlow::Block* _flow; 158 int _pred_count; // how many predecessors in CFG? 159 int _preds_parsed; // how many of these have been parsed? 160 uint _count; // how many times executed? Currently only set by _goto's 161 bool _is_parsed; // has this block been parsed yet? 162 bool _is_handler; // is this block an exception handler? 163 bool _has_merged_backedge; // does this block have merged backedge? 164 SafePointNode* _start_map; // all values flowing into this block 165 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap 166 bool _has_predicates; // Were predicates added before parsing of the loop head? 167 168 int _num_successors; // Includes only normal control flow. 169 int _all_successors; // Include exception paths also. 170 Block** _successors; 171 172 public: 173 174 // Set up the block data structure itself. 175 Block(Parse* outer, int rpo); 176 177 // Set up the block's relations to other blocks. 178 void init_graph(Parse* outer); 179 180 ciTypeFlow::Block* flow() const { return _flow; } 181 int pred_count() const { return _pred_count; } 182 int preds_parsed() const { return _preds_parsed; } 183 bool is_parsed() const { return _is_parsed; } 184 bool is_handler() const { return _is_handler; } 185 void set_count( uint x ) { _count = x; } 186 uint count() const { return _count; } 187 188 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } 189 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } 190 191 // True after any predecessor flows control into this block 192 bool is_merged() const { return _start_map != NULL; } 193 194 #ifdef ASSERT 195 // True after backedge predecessor flows control into this block 196 bool has_merged_backedge() const { return _has_merged_backedge; } 197 void mark_merged_backedge(Block* pred) { 198 assert(is_SEL_head(), "should be loop head"); 199 if (pred != NULL && is_SEL_backedge(pred)) { 200 assert(is_parsed(), "block should be parsed before merging backedges"); 201 _has_merged_backedge = true; 202 } 203 } 204 #endif 205 206 // True when all non-exception predecessors have been parsed. 207 bool is_ready() const { return preds_parsed() == pred_count(); } 208 209 bool has_predicates() const { return _has_predicates; } 210 void set_has_predicates() { _has_predicates = true; } 211 212 int num_successors() const { return _num_successors; } 213 int all_successors() const { return _all_successors; } 214 Block* successor_at(int i) const { 215 assert((uint)i < (uint)all_successors(), ""); 216 return _successors[i]; 217 } 218 Block* successor_for_bci(int bci); 219 220 int start() const { return flow()->start(); } 221 int limit() const { return flow()->limit(); } 222 int rpo() const { return flow()->rpo(); } 223 int start_sp() const { return flow()->stack_size(); } 224 225 bool is_loop_head() const { return flow()->is_loop_head(); } 226 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } 227 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } 228 bool is_invariant_local(uint i) const { 229 const JVMState* jvms = start_map()->jvms(); 230 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; 231 return flow()->is_invariant_local(i - jvms->locoff()); 232 } 233 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } 234 235 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } 236 237 const Type* stack_type_at(int i) const; 238 const Type* local_type_at(int i) const; 239 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } 240 241 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } 242 243 // Call this just before parsing a block. 244 void mark_parsed() { 245 assert(!_is_parsed, "must parse each block exactly once"); 246 _is_parsed = true; 247 } 248 249 // Return the phi/region input index for the "current" pred, 250 // and bump the pred number. For historical reasons these index 251 // numbers are handed out in descending order. The last index is 252 // always PhiNode::Input (i.e., 1). The value returned is known 253 // as a "path number" because it distinguishes by which path we are 254 // entering the block. 255 int next_path_num() { 256 assert(preds_parsed() < pred_count(), "too many preds?"); 257 return pred_count() - _preds_parsed++; 258 } 259 260 // Add a previously unaccounted predecessor to this block. 261 // This operates by increasing the size of the block's region 262 // and all its phi nodes (if any). The value returned is a 263 // path number ("pnum"). 264 int add_new_path(); 265 266 // Initialize me by recording the parser's map. My own map must be NULL. 267 void record_state(Parse* outer); 268 }; 269 270 #ifndef PRODUCT 271 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. 272 class BytecodeParseHistogram : public ResourceObj { 273 private: 274 enum BPHType { 275 BPH_transforms, 276 BPH_values 277 }; 278 static bool _initialized; 279 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; 280 static uint _nodes_constructed[Bytecodes::number_of_codes]; 281 static uint _nodes_transformed[Bytecodes::number_of_codes]; 282 static uint _new_values [Bytecodes::number_of_codes]; 283 284 Bytecodes::Code _initial_bytecode; 285 int _initial_node_count; 286 int _initial_transforms; 287 int _initial_values; 288 289 Parse *_parser; 290 Compile *_compiler; 291 292 // Initialization 293 static void reset(); 294 295 // Return info being collected, select with global flag 'BytecodeParseInfo' 296 int current_count(BPHType info_selector); 297 298 public: 299 BytecodeParseHistogram(Parse *p, Compile *c); 300 static bool initialized(); 301 302 // Record info when starting to parse one bytecode 303 void set_initial_state( Bytecodes::Code bc ); 304 // Record results of parsing one bytecode 305 void record_change(); 306 307 // Profile printing 308 static void print(float cutoff = 0.01F); // cutoff in percent 309 }; 310 311 public: 312 // Record work done during parsing 313 BytecodeParseHistogram* _parse_histogram; 314 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } 315 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } 316 #endif 317 318 private: 319 friend class Block; 320 321 // Variables which characterize this compilation as a whole: 322 323 JVMState* _caller; // JVMS which carries incoming args & state. 324 float _expected_uses; // expected number of calls to this code 325 float _prof_factor; // discount applied to my profile counts 326 int _depth; // Inline tree depth, for debug printouts 327 const TypeFunc*_tf; // My kind of function type 328 int _entry_bci; // the osr bci or InvocationEntryBci 329 330 ciTypeFlow* _flow; // Results of previous flow pass. 331 Block* _blocks; // Array of basic-block structs. 332 int _block_count; // Number of elements in _blocks. 333 334 GraphKit _exits; // Record all normal returns and throws here. 335 bool _wrote_final; // Did we write a final field? 336 bool _wrote_volatile; // Did we write a volatile field? 337 bool _wrote_stable; // Did we write a @Stable field? 338 bool _wrote_fields; // Did we write any field? 339 bool _count_invocations; // update and test invocation counter 340 bool _method_data_update; // update method data oop 341 Node* _alloc_with_final; // An allocation node with final field 342 343 // Variables which track Java semantics during bytecode parsing: 344 345 Block* _block; // block currently getting parsed 346 ciBytecodeStream _iter; // stream of this method's bytecodes 347 348 const FastLockNode* _synch_lock; // FastLockNode for synchronized method 349 350 #ifndef PRODUCT 351 int _max_switch_depth; // Debugging SwitchRanges. 352 int _est_switch_depth; // Debugging SwitchRanges. 353 #endif 354 355 bool _first_return; // true if return is the first to be parsed 356 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths? 357 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list. 358 359 public: 360 // Constructor 361 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); 362 363 virtual Parse* is_Parse() const { return (Parse*)this; } 364 365 // Accessors. 366 JVMState* caller() const { return _caller; } 367 float expected_uses() const { return _expected_uses; } 368 float prof_factor() const { return _prof_factor; } 369 int depth() const { return _depth; } 370 const TypeFunc* tf() const { return _tf; } 371 // entry_bci() -- see osr_bci, etc. 372 373 ciTypeFlow* flow() const { return _flow; } 374 // blocks() -- see rpo_at, start_block, etc. 375 int block_count() const { return _block_count; } 376 377 GraphKit& exits() { return _exits; } 378 bool wrote_final() const { return _wrote_final; } 379 void set_wrote_final(bool z) { _wrote_final = z; } 380 bool wrote_volatile() const { return _wrote_volatile; } 381 void set_wrote_volatile(bool z) { _wrote_volatile = z; } 382 bool wrote_stable() const { return _wrote_stable; } 383 void set_wrote_stable(bool z) { _wrote_stable = z; } 384 bool wrote_fields() const { return _wrote_fields; } 385 void set_wrote_fields(bool z) { _wrote_fields = z; } 386 bool count_invocations() const { return _count_invocations; } 387 bool method_data_update() const { return _method_data_update; } 388 Node* alloc_with_final() const { return _alloc_with_final; } 389 void set_alloc_with_final(Node* n) { 390 assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?"); 391 _alloc_with_final = n; 392 } 393 394 Block* block() const { return _block; } 395 ciBytecodeStream& iter() { return _iter; } 396 Bytecodes::Code bc() const { return _iter.cur_bc(); } 397 398 void set_block(Block* b) { _block = b; } 399 400 // Derived accessors: 401 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } 402 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } 403 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } 404 405 void set_parse_bci(int bci); 406 407 // Must this parse be aborted? 408 bool failing() { return C->failing(); } 409 410 Block* rpo_at(int rpo) { 411 assert(0 <= rpo && rpo < _block_count, "oob"); 412 return &_blocks[rpo]; 413 } 414 Block* start_block() { 415 return rpo_at(flow()->start_block()->rpo()); 416 } 417 // Can return NULL if the flow pass did not complete a block. 418 Block* successor_for_bci(int bci) { 419 return block()->successor_for_bci(bci); 420 } 421 422 private: 423 // Create a JVMS & map for the initial state of this method. 424 SafePointNode* create_entry_map(); 425 426 // OSR helpers 427 Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base); 428 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); 429 void load_interpreter_state(Node* osr_buf); 430 431 // Functions for managing basic blocks: 432 void init_blocks(); 433 void load_state_from(Block* b); 434 void store_state_to(Block* b) { b->record_state(this); } 435 436 // Parse all the basic blocks. 437 void do_all_blocks(); 438 439 // Parse the current basic block 440 void do_one_block(); 441 442 // Raise an error if we get a bad ciTypeFlow CFG. 443 void handle_missing_successor(int bci); 444 445 // first actions (before BCI 0) 446 void do_method_entry(); 447 448 // implementation of monitorenter/monitorexit 449 void do_monitor_enter(); 450 void do_monitor_exit(); 451 452 // Eagerly create phie throughout the state, to cope with back edges. 453 void ensure_phis_everywhere(); 454 455 // Merge the current mapping into the basic block starting at bci 456 void merge( int target_bci); 457 // Same as plain merge, except that it allocates a new path number. 458 void merge_new_path( int target_bci); 459 // Merge the current mapping into an exception handler. 460 void merge_exception(int target_bci); 461 // Helper: Merge the current mapping into the given basic block 462 void merge_common(Block* target, int pnum); 463 // Helper functions for merging individual cells. 464 PhiNode *ensure_phi( int idx, bool nocreate = false); 465 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); 466 // Helper to merge the current memory state into the given basic block 467 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); 468 469 // Parse this bytecode, and alter the Parsers JVM->Node mapping 470 void do_one_bytecode(); 471 472 // helper function to generate array store check 473 Node* array_store_check(); 474 // Helper function to generate array load 475 void array_load(BasicType etype); 476 // Helper function to generate array store 477 void array_store(BasicType etype); 478 // Helper function to compute array addressing 479 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL); 480 481 void rtm_deopt(); 482 483 // Pass current map to exits 484 void return_current(Node* value); 485 486 // Register finalizers on return from Object.<init> 487 void call_register_finalizer(); 488 489 // Insert a compiler safepoint into the graph 490 void add_safepoint(); 491 492 // Insert a compiler safepoint into the graph, if there is a back-branch. 493 void maybe_add_safepoint(int target_bci) { 494 if (UseLoopSafepoints && target_bci <= bci()) { 495 add_safepoint(); 496 } 497 } 498 499 // Note: Intrinsic generation routines may be found in library_call.cpp. 500 501 // Helper function to setup Ideal Call nodes 502 void do_call(); 503 504 // Helper function to uncommon-trap or bailout for non-compilable call-sites 505 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); 506 507 // Helper function to setup for type-profile based inlining 508 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method); 509 510 // Helper functions for type checking bytecodes: 511 void do_checkcast(); 512 void do_instanceof(); 513 514 // Helper functions for shifting & arithmetic 515 void modf(); 516 void modd(); 517 void l2f(); 518 519 void do_irem(); 520 521 // implementation of _get* and _put* bytecodes 522 void do_getstatic() { do_field_access(true, false); } 523 void do_getfield () { do_field_access(true, true); } 524 void do_putstatic() { do_field_access(false, false); } 525 void do_putfield () { do_field_access(false, true); } 526 527 // common code for making initial checks and forming addresses 528 void do_field_access(bool is_get, bool is_field); 529 bool static_field_ok_in_clinit(ciField *field, ciMethod *method); 530 531 // common code for actually performing the load or store 532 void do_get_xxx(Node* obj, ciField* field); 533 void do_put_xxx(Node* obj, ciField* field, bool is_field); 534 535 // implementation of object creation bytecodes 536 void emit_guard_for_new(ciInstanceKlass* klass); 537 void do_new(); 538 void do_defaultvalue(); 539 void do_withfield(); 540 void do_newarray(BasicType elemtype); 541 void do_newarray(); 542 void do_multianewarray(); 543 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); 544 545 // implementation of jsr/ret 546 void do_jsr(); 547 void do_ret(); 548 549 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test); 550 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test); 551 bool seems_never_taken(float prob) const; 552 bool path_is_suitable_for_uncommon_trap(float prob) const; 553 bool seems_stable_comparison() const; 554 555 void do_ifnull(BoolTest::mask btest, Node* c); 556 void do_if(BoolTest::mask btest, Node* c, bool new_path = false, Node** ctrl_taken = NULL); 557 void do_acmp(BoolTest::mask btest, Node* a, Node* b); 558 int repush_if_args(); 559 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path); 560 void sharpen_type_after_if(BoolTest::mask btest, 561 Node* con, const Type* tcon, 562 Node* val, const Type* tval); 563 void maybe_add_predicate_after_if(Block* path); 564 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt); 565 Node* jump_if_join(Node* iffalse, Node* iftrue); 566 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index, bool unc); 567 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index, bool unc); 568 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index, bool unc); 569 570 friend class SwitchRange; 571 void do_tableswitch(); 572 void do_lookupswitch(); 573 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); 574 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); 575 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi); 576 577 void decrement_age(); 578 // helper functions for methodData style profiling 579 void test_counter_against_threshold(Node* cnt, int limit); 580 void increment_and_test_invocation_counter(int limit); 581 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); 582 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 583 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 584 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); 585 586 void profile_method_entry(); 587 void profile_taken_branch(int target_bci, bool force_update = false); 588 void profile_not_taken_branch(bool force_update = false); 589 void profile_call(Node* receiver); 590 void profile_generic_call(); 591 void profile_receiver_type(Node* receiver); 592 void profile_ret(int target_bci); 593 void profile_null_checkcast(); 594 void profile_switch_case(int table_index); 595 596 // helper function for call statistics 597 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; 598 599 Node_Notes* make_node_notes(Node_Notes* caller_nn); 600 601 // Helper functions for handling normal and abnormal exits. 602 void build_exits(); 603 604 // Fix up all exceptional control flow exiting a single bytecode. 605 void do_exceptions(); 606 607 // Fix up all exiting control flow at the end of the parse. 608 void do_exits(); 609 610 // Add Catch/CatchProjs 611 // The call is either a Java call or the VM's rethrow stub 612 void catch_call_exceptions(ciExceptionHandlerStream&); 613 614 // Handle all exceptions thrown by the inlined method. 615 // Also handles exceptions for individual bytecodes. 616 void catch_inline_exceptions(SafePointNode* ex_map); 617 618 // Merge the given map into correct exceptional exit state. 619 // Assumes that there is no applicable local handler. 620 void throw_to_exit(SafePointNode* ex_map); 621 622 // Use speculative type to optimize CmpP node 623 Node* optimize_cmp_with_klass(Node* c); 624 625 public: 626 #ifndef PRODUCT 627 // Handle PrintOpto, etc. 628 void show_parse_info(); 629 void dump_map_adr_mem() const; 630 static void print_statistics(); // Print some performance counters 631 void dump(); 632 void dump_bci(int bci); 633 #endif 634 }; 635 636 #endif // SHARE_OPTO_PARSE_HPP