1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_PARSE_HPP 26 #define SHARE_VM_OPTO_PARSE_HPP 27 28 #include "ci/ciMethodData.hpp" 29 #include "ci/ciTypeFlow.hpp" 30 #include "compiler/methodLiveness.hpp" 31 #include "libadt/vectset.hpp" 32 #include "oops/generateOopMap.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/subnode.hpp" 35 36 class BytecodeParseHistogram; 37 class InlineTree; 38 class Parse; 39 class SwitchRange; 40 41 42 //------------------------------InlineTree------------------------------------- 43 class InlineTree : public ResourceObj { 44 friend class VMStructs; 45 46 Compile* C; // cache 47 JVMState* _caller_jvms; // state of caller 48 ciMethod* _method; // method being called by the caller_jvms 49 InlineTree* _caller_tree; 50 uint _count_inline_bcs; // Accumulated count of inlined bytecodes 51 // Call-site count / interpreter invocation count, scaled recursively. 52 // Always between 0.0 and 1.0. Represents the percentage of the method's 53 // total execution time used at this call site. 54 const float _site_invoke_ratio; 55 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted) 56 float compute_callee_frequency( int caller_bci ) const; 57 58 GrowableArray<InlineTree*> _subtrees; 59 60 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN; 61 const char* _msg; 62 protected: 63 InlineTree(Compile* C, 64 const InlineTree* caller_tree, 65 ciMethod* callee_method, 66 JVMState* caller_jvms, 67 int caller_bci, 68 float site_invoke_ratio, 69 int max_inline_level); 70 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, 71 JVMState* caller_jvms, 72 int caller_bci); 73 bool try_to_inline(ciMethod* callee_method, 74 ciMethod* caller_method, 75 int caller_bci, 76 JVMState* jvms, 77 ciCallProfile& profile, 78 WarmCallInfo* wci_result, 79 bool& should_delay, 80 bool is_mh_inline); 81 bool should_inline(ciMethod* callee_method, 82 ciMethod* caller_method, 83 int caller_bci, 84 ciCallProfile& profile, 85 WarmCallInfo* wci_result); 86 bool should_not_inline(ciMethod* callee_method, 87 ciMethod* caller_method, 88 JVMState* jvms, 89 WarmCallInfo* wci_result); 90 void print_inlining(ciMethod* callee_method, int caller_bci, 91 ciMethod* caller_method, bool success) const; 92 93 InlineTree* caller_tree() const { return _caller_tree; } 94 InlineTree* callee_at(int bci, ciMethod* m) const; 95 int inline_level() const { return stack_depth(); } 96 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } 97 const char* msg() const { return _msg; } 98 void set_msg(const char* msg) { _msg = msg; } 99 public: 100 static const char* check_can_parse(ciMethod* callee); 101 102 static InlineTree* build_inline_tree_root(); 103 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee); 104 105 // For temporary (stack-allocated, stateless) ilts: 106 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level); 107 108 // See if it is OK to inline. 109 // The receiver is the inline tree for the caller. 110 // 111 // The result is a temperature indication. If it is hot or cold, 112 // inlining is immediate or undesirable. Otherwise, the info block 113 // returned is newly allocated and may be enqueued. 114 // 115 // If the method is inlinable, a new inline subtree is created on the fly, 116 // and may be accessed by find_subtree_from_root. 117 // The call_method is the dest_method for a special or static invocation. 118 // The call_method is an optimized virtual method candidate otherwise. 119 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay, bool is_mh_inline); 120 121 // Information about inlined method 122 JVMState* caller_jvms() const { return _caller_jvms; } 123 ciMethod *method() const { return _method; } 124 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } 125 uint count_inline_bcs() const { return _count_inline_bcs; } 126 float site_invoke_ratio() const { return _site_invoke_ratio; }; 127 128 #ifndef PRODUCT 129 private: 130 uint _count_inlines; // Count of inlined methods 131 public: 132 // Debug information collected during parse 133 uint count_inlines() const { return _count_inlines; }; 134 #endif 135 GrowableArray<InlineTree*> subtrees() { return _subtrees; } 136 137 void print_value_on(outputStream* st) const PRODUCT_RETURN; 138 139 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation 140 bool forced_inline() const { return _forced_inline; } 141 // Count number of nodes in this subtree 142 int count() const; 143 // Dump inlining replay data to the stream. 144 void dump_replay_data(outputStream* out); 145 }; 146 147 148 //----------------------------------------------------------------------------- 149 //------------------------------Parse------------------------------------------ 150 // Parse bytecodes, build a Graph 151 class Parse : public GraphKit { 152 public: 153 // Per-block information needed by the parser: 154 class Block { 155 private: 156 ciTypeFlow::Block* _flow; 157 int _pred_count; // how many predecessors in CFG? 158 int _preds_parsed; // how many of these have been parsed? 159 uint _count; // how many times executed? Currently only set by _goto's 160 bool _is_parsed; // has this block been parsed yet? 161 bool _is_handler; // is this block an exception handler? 162 bool _has_merged_backedge; // does this block have merged backedge? 163 SafePointNode* _start_map; // all values flowing into this block 164 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap 165 166 int _num_successors; // Includes only normal control flow. 167 int _all_successors; // Include exception paths also. 168 Block** _successors; 169 170 public: 171 172 // Set up the block data structure itself. 173 Block(Parse* outer, int rpo); 174 175 // Set up the block's relations to other blocks. 176 void init_graph(Parse* outer); 177 178 ciTypeFlow::Block* flow() const { return _flow; } 179 int pred_count() const { return _pred_count; } 180 int preds_parsed() const { return _preds_parsed; } 181 bool is_parsed() const { return _is_parsed; } 182 bool is_handler() const { return _is_handler; } 183 void set_count( uint x ) { _count = x; } 184 uint count() const { return _count; } 185 186 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } 187 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } 188 189 // True after any predecessor flows control into this block 190 bool is_merged() const { return _start_map != NULL; } 191 192 #ifdef ASSERT 193 // True after backedge predecessor flows control into this block 194 bool has_merged_backedge() const { return _has_merged_backedge; } 195 void mark_merged_backedge(Block* pred) { 196 assert(is_SEL_head(), "should be loop head"); 197 if (pred != NULL && is_SEL_backedge(pred)) { 198 assert(is_parsed(), "block should be parsed before merging backedges"); 199 _has_merged_backedge = true; 200 } 201 } 202 #endif 203 204 // True when all non-exception predecessors have been parsed. 205 bool is_ready() const { return preds_parsed() == pred_count(); } 206 207 int num_successors() const { return _num_successors; } 208 int all_successors() const { return _all_successors; } 209 Block* successor_at(int i) const { 210 assert((uint)i < (uint)all_successors(), ""); 211 return _successors[i]; 212 } 213 Block* successor_for_bci(int bci); 214 215 int start() const { return flow()->start(); } 216 int limit() const { return flow()->limit(); } 217 int rpo() const { return flow()->rpo(); } 218 int start_sp() const { return flow()->stack_size(); } 219 220 bool is_loop_head() const { return flow()->is_loop_head(); } 221 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } 222 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } 223 bool is_invariant_local(uint i) const { 224 const JVMState* jvms = start_map()->jvms(); 225 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; 226 return flow()->is_invariant_local(i - jvms->locoff()); 227 } 228 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } 229 230 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } 231 232 const Type* stack_type_at(int i) const; 233 const Type* local_type_at(int i) const; 234 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } 235 236 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } 237 238 // Call this just before parsing a block. 239 void mark_parsed() { 240 assert(!_is_parsed, "must parse each block exactly once"); 241 _is_parsed = true; 242 } 243 244 // Return the phi/region input index for the "current" pred, 245 // and bump the pred number. For historical reasons these index 246 // numbers are handed out in descending order. The last index is 247 // always PhiNode::Input (i.e., 1). The value returned is known 248 // as a "path number" because it distinguishes by which path we are 249 // entering the block. 250 int next_path_num() { 251 assert(preds_parsed() < pred_count(), "too many preds?"); 252 return pred_count() - _preds_parsed++; 253 } 254 255 // Add a previously unaccounted predecessor to this block. 256 // This operates by increasing the size of the block's region 257 // and all its phi nodes (if any). The value returned is a 258 // path number ("pnum"). 259 int add_new_path(); 260 261 // Initialize me by recording the parser's map. My own map must be NULL. 262 void record_state(Parse* outer); 263 }; 264 265 #ifndef PRODUCT 266 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. 267 class BytecodeParseHistogram : public ResourceObj { 268 private: 269 enum BPHType { 270 BPH_transforms, 271 BPH_values 272 }; 273 static bool _initialized; 274 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; 275 static uint _nodes_constructed[Bytecodes::number_of_codes]; 276 static uint _nodes_transformed[Bytecodes::number_of_codes]; 277 static uint _new_values [Bytecodes::number_of_codes]; 278 279 Bytecodes::Code _initial_bytecode; 280 int _initial_node_count; 281 int _initial_transforms; 282 int _initial_values; 283 284 Parse *_parser; 285 Compile *_compiler; 286 287 // Initialization 288 static void reset(); 289 290 // Return info being collected, select with global flag 'BytecodeParseInfo' 291 int current_count(BPHType info_selector); 292 293 public: 294 BytecodeParseHistogram(Parse *p, Compile *c); 295 static bool initialized(); 296 297 // Record info when starting to parse one bytecode 298 void set_initial_state( Bytecodes::Code bc ); 299 // Record results of parsing one bytecode 300 void record_change(); 301 302 // Profile printing 303 static void print(float cutoff = 0.01F); // cutoff in percent 304 }; 305 306 public: 307 // Record work done during parsing 308 BytecodeParseHistogram* _parse_histogram; 309 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } 310 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } 311 #endif 312 313 private: 314 friend class Block; 315 316 // Variables which characterize this compilation as a whole: 317 318 JVMState* _caller; // JVMS which carries incoming args & state. 319 float _expected_uses; // expected number of calls to this code 320 float _prof_factor; // discount applied to my profile counts 321 int _depth; // Inline tree depth, for debug printouts 322 const TypeFunc*_tf; // My kind of function type 323 int _entry_bci; // the osr bci or InvocationEntryBci 324 325 ciTypeFlow* _flow; // Results of previous flow pass. 326 Block* _blocks; // Array of basic-block structs. 327 int _block_count; // Number of elements in _blocks. 328 329 GraphKit _exits; // Record all normal returns and throws here. 330 bool _wrote_final; // Did we write a final field? 331 bool _wrote_volatile; // Did we write a volatile field? 332 bool _wrote_stable; // Did we write a @Stable field? 333 bool _wrote_fields; // Did we write any field? 334 bool _count_invocations; // update and test invocation counter 335 bool _method_data_update; // update method data oop 336 Node* _alloc_with_final; // An allocation node with final field 337 338 // Variables which track Java semantics during bytecode parsing: 339 340 Block* _block; // block currently getting parsed 341 ciBytecodeStream _iter; // stream of this method's bytecodes 342 343 const FastLockNode* _synch_lock; // FastLockNode for synchronized method 344 345 #ifndef PRODUCT 346 int _max_switch_depth; // Debugging SwitchRanges. 347 int _est_switch_depth; // Debugging SwitchRanges. 348 #endif 349 350 bool _first_return; // true if return is the first to be parsed 351 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths? 352 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list. 353 354 public: 355 // Constructor 356 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); 357 358 virtual Parse* is_Parse() const { return (Parse*)this; } 359 360 // Accessors. 361 JVMState* caller() const { return _caller; } 362 float expected_uses() const { return _expected_uses; } 363 float prof_factor() const { return _prof_factor; } 364 int depth() const { return _depth; } 365 const TypeFunc* tf() const { return _tf; } 366 // entry_bci() -- see osr_bci, etc. 367 368 ciTypeFlow* flow() const { return _flow; } 369 // blocks() -- see rpo_at, start_block, etc. 370 int block_count() const { return _block_count; } 371 372 GraphKit& exits() { return _exits; } 373 bool wrote_final() const { return _wrote_final; } 374 void set_wrote_final(bool z) { _wrote_final = z; } 375 bool wrote_volatile() const { return _wrote_volatile; } 376 void set_wrote_volatile(bool z) { _wrote_volatile = z; } 377 bool wrote_stable() const { return _wrote_stable; } 378 void set_wrote_stable(bool z) { _wrote_stable = z; } 379 bool wrote_fields() const { return _wrote_fields; } 380 void set_wrote_fields(bool z) { _wrote_fields = z; } 381 bool count_invocations() const { return _count_invocations; } 382 bool method_data_update() const { return _method_data_update; } 383 Node* alloc_with_final() const { return _alloc_with_final; } 384 void set_alloc_with_final(Node* n) { 385 assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?"); 386 _alloc_with_final = n; 387 } 388 389 Block* block() const { return _block; } 390 ciBytecodeStream& iter() { return _iter; } 391 Bytecodes::Code bc() const { return _iter.cur_bc(); } 392 393 void set_block(Block* b) { _block = b; } 394 395 // Derived accessors: 396 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } 397 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } 398 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } 399 400 void set_parse_bci(int bci); 401 402 // Must this parse be aborted? 403 bool failing() { return C->failing(); } 404 405 Block* rpo_at(int rpo) { 406 assert(0 <= rpo && rpo < _block_count, "oob"); 407 return &_blocks[rpo]; 408 } 409 Block* start_block() { 410 return rpo_at(flow()->start_block()->rpo()); 411 } 412 // Can return NULL if the flow pass did not complete a block. 413 Block* successor_for_bci(int bci) { 414 return block()->successor_for_bci(bci); 415 } 416 417 private: 418 // Create a JVMS & map for the initial state of this method. 419 SafePointNode* create_entry_map(); 420 421 // OSR helpers 422 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); 423 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); 424 void load_interpreter_state(Node* osr_buf); 425 426 // Functions for managing basic blocks: 427 void init_blocks(); 428 void load_state_from(Block* b); 429 void store_state_to(Block* b) { b->record_state(this); } 430 431 // Parse all the basic blocks. 432 void do_all_blocks(); 433 434 // Parse the current basic block 435 void do_one_block(); 436 437 // Raise an error if we get a bad ciTypeFlow CFG. 438 void handle_missing_successor(int bci); 439 440 // first actions (before BCI 0) 441 void do_method_entry(); 442 443 // implementation of monitorenter/monitorexit 444 void do_monitor_enter(); 445 void do_monitor_exit(); 446 447 // Eagerly create phie throughout the state, to cope with back edges. 448 void ensure_phis_everywhere(); 449 450 // Merge the current mapping into the basic block starting at bci 451 void merge( int target_bci); 452 // Same as plain merge, except that it allocates a new path number. 453 void merge_new_path( int target_bci); 454 // Merge the current mapping into an exception handler. 455 void merge_exception(int target_bci); 456 // Helper: Merge the current mapping into the given basic block 457 void merge_common(Block* target, int pnum); 458 // Helper functions for merging individual cells. 459 PhiNode *ensure_phi( int idx, bool nocreate = false); 460 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); 461 // Helper to merge the current memory state into the given basic block 462 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); 463 464 // Parse this bytecode, and alter the Parsers JVM->Node mapping 465 void do_one_bytecode(); 466 467 // helper function to generate array store check 468 void array_store_check(); 469 // Helper function to generate array load 470 void array_load(BasicType etype); 471 // Helper function to generate array store 472 void array_store(BasicType etype); 473 // Helper function to compute array addressing 474 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL); 475 476 void rtm_deopt(); 477 478 // Pass current map to exits 479 void return_current(Node* value); 480 481 // Register finalizers on return from Object.<init> 482 void call_register_finalizer(); 483 484 // Insert a compiler safepoint into the graph 485 void add_safepoint(); 486 487 // Insert a compiler safepoint into the graph, if there is a back-branch. 488 void maybe_add_safepoint(int target_bci) { 489 if (UseLoopSafepoints && target_bci <= bci()) { 490 add_safepoint(); 491 } 492 } 493 494 // Note: Intrinsic generation routines may be found in library_call.cpp. 495 496 // Helper function to setup Ideal Call nodes 497 void do_call(); 498 499 // Helper function to uncommon-trap or bailout for non-compilable call-sites 500 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); 501 502 // Helper function to setup for type-profile based inlining 503 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method); 504 505 // Helper functions for type checking bytecodes: 506 void do_checkcast(); 507 void do_instanceof(); 508 509 // Helper functions for shifting & arithmetic 510 void modf(); 511 void modd(); 512 void l2f(); 513 514 void do_irem(); 515 516 // implementation of _get* and _put* bytecodes 517 void do_getstatic() { do_field_access(true, false); } 518 void do_getfield () { do_field_access(true, true); } 519 void do_putstatic() { do_field_access(false, false); } 520 void do_putfield () { do_field_access(false, true); } 521 522 // common code for making initial checks and forming addresses 523 void do_field_access(bool is_get, bool is_field); 524 bool static_field_ok_in_clinit(ciField *field, ciMethod *method); 525 526 // common code for actually performing the load or store 527 void do_get_xxx(Node* obj, ciField* field, bool is_field); 528 void do_put_xxx(Node* obj, ciField* field, bool is_field); 529 530 // implementation of object creation bytecodes 531 void emit_guard_for_new(ciInstanceKlass* klass); 532 void do_new(); 533 void do_newarray(BasicType elemtype); 534 void do_anewarray(); 535 void do_multianewarray(); 536 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); 537 538 // implementation of jsr/ret 539 void do_jsr(); 540 void do_ret(); 541 542 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test); 543 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test); 544 bool seems_never_taken(float prob) const; 545 bool path_is_suitable_for_uncommon_trap(float prob) const; 546 bool seems_stable_comparison() const; 547 548 void do_ifnull(BoolTest::mask btest, Node* c); 549 void do_if(BoolTest::mask btest, Node* c); 550 int repush_if_args(); 551 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 552 Block* path, Block* other_path); 553 void sharpen_type_after_if(BoolTest::mask btest, 554 Node* con, const Type* tcon, 555 Node* val, const Type* tval); 556 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask); 557 Node* jump_if_join(Node* iffalse, Node* iftrue); 558 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index); 559 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index); 560 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index); 561 562 friend class SwitchRange; 563 void do_tableswitch(); 564 void do_lookupswitch(); 565 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); 566 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); 567 568 void decrement_age(); 569 // helper functions for methodData style profiling 570 void test_counter_against_threshold(Node* cnt, int limit); 571 void increment_and_test_invocation_counter(int limit); 572 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); 573 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 574 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 575 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); 576 577 void profile_method_entry(); 578 void profile_taken_branch(int target_bci, bool force_update = false); 579 void profile_not_taken_branch(bool force_update = false); 580 void profile_call(Node* receiver); 581 void profile_generic_call(); 582 void profile_receiver_type(Node* receiver); 583 void profile_ret(int target_bci); 584 void profile_null_checkcast(); 585 void profile_switch_case(int table_index); 586 587 // helper function for call statistics 588 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; 589 590 Node_Notes* make_node_notes(Node_Notes* caller_nn); 591 592 // Helper functions for handling normal and abnormal exits. 593 void build_exits(); 594 595 // Fix up all exceptional control flow exiting a single bytecode. 596 void do_exceptions(); 597 598 // Fix up all exiting control flow at the end of the parse. 599 void do_exits(); 600 601 // Add Catch/CatchProjs 602 // The call is either a Java call or the VM's rethrow stub 603 void catch_call_exceptions(ciExceptionHandlerStream&); 604 605 // Handle all exceptions thrown by the inlined method. 606 // Also handles exceptions for individual bytecodes. 607 void catch_inline_exceptions(SafePointNode* ex_map); 608 609 // Merge the given map into correct exceptional exit state. 610 // Assumes that there is no applicable local handler. 611 void throw_to_exit(SafePointNode* ex_map); 612 613 // Use speculative type to optimize CmpP node 614 Node* optimize_cmp_with_klass(Node* c); 615 616 public: 617 #ifndef PRODUCT 618 // Handle PrintOpto, etc. 619 void show_parse_info(); 620 void dump_map_adr_mem() const; 621 static void print_statistics(); // Print some performance counters 622 void dump(); 623 void dump_bci(int bci); 624 #endif 625 }; 626 627 #endif // SHARE_VM_OPTO_PARSE_HPP