< prev index next >

src/share/vm/opto/parse1.cpp

Print this page
rev 10293 : 8150720: Cleanup code around PrintOptoStatistics
Reviewed-by: kvn, shade, vlivanov


  28 #include "oops/method.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/idealGraphPrinter.hpp"
  33 #include "opto/locknode.hpp"
  34 #include "opto/memnode.hpp"
  35 #include "opto/opaquenode.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "utilities/copy.hpp"
  43 
  44 // Static array so we can figure out which bytecodes stop us from compiling
  45 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  46 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  47 

  48 int nodes_created              = 0;
  49 int methods_parsed             = 0;
  50 int methods_seen               = 0;
  51 int blocks_parsed              = 0;
  52 int blocks_seen                = 0;
  53 
  54 int explicit_null_checks_inserted = 0;
  55 int explicit_null_checks_elided   = 0;
  56 int all_null_checks_found         = 0, implicit_null_checks              = 0;
  57 int implicit_null_throws          = 0;
  58 
  59 int reclaim_idx  = 0;
  60 int reclaim_in   = 0;
  61 int reclaim_node = 0;
  62 
  63 #ifndef PRODUCT
  64 bool Parse::BytecodeParseHistogram::_initialized = false;
  65 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes];
  66 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes];
  67 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes];
  68 uint Parse::BytecodeParseHistogram::_new_values       [Bytecodes::number_of_codes];
  69 #endif
  70 
  71 //------------------------------print_statistics-------------------------------
  72 #ifndef PRODUCT
  73 void Parse::print_statistics() {
  74   tty->print_cr("--- Compiler Statistics ---");
  75   tty->print("Methods seen: %d  Methods parsed: %d", methods_seen, methods_parsed);
  76   tty->print("  Nodes created: %d", nodes_created);
  77   tty->cr();
  78   if (methods_seen != methods_parsed)
  79     tty->print_cr("Reasons for parse failures (NOT cumulative):");

  80   tty->print_cr("Blocks parsed: %d  Blocks seen: %d", blocks_parsed, blocks_seen);
  81 
  82   if( explicit_null_checks_inserted )
  83     tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found);
  84   if( all_null_checks_found )




  85     tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
  86                   (100*implicit_null_checks)/all_null_checks_found);
  87   if( implicit_null_throws )

  88     tty->print_cr("%d implicit null exceptions at runtime",
  89                   implicit_null_throws);

  90 
  91   if( PrintParseStatistics && BytecodeParseHistogram::initialized() ) {
  92     BytecodeParseHistogram::print();
  93   }
  94 }
  95 #endif
  96 
  97 //------------------------------ON STACK REPLACEMENT---------------------------
  98 
  99 // Construct a node which can be used to get incoming state for
 100 // on stack replacement.
 101 Node *Parse::fetch_interpreter_state(int index,
 102                                      BasicType bt,
 103                                      Node *local_addrs,
 104                                      Node *local_addrs_base) {
 105   Node *mem = memory(Compile::AliasIdxRaw);
 106   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 107   Node *ctl = control();
 108 
 109   // Very similar to LoadNode::make, except we handle un-aligned longs and
 110   // doubles on Sparc.  Intel can handle them just fine directly.
 111   Node *l = NULL;


 478   // Accumulate total sum of decompilations, also.
 479   C->set_decompile_count(C->decompile_count() + md->decompile_count());
 480 
 481   _count_invocations = C->do_count_invocations();
 482   _method_data_update = C->do_method_data_update();
 483 
 484   if (log != NULL && method()->has_exception_handlers()) {
 485     log->elem("observe that='has_exception_handlers'");
 486   }
 487 
 488   assert(method()->can_be_compiled(),       "Can not parse this method, cutout earlier");
 489   assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
 490 
 491   // Always register dependence if JVMTI is enabled, because
 492   // either breakpoint setting or hotswapping of methods may
 493   // cause deoptimization.
 494   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 495     C->dependencies()->assert_evol_method(method());
 496   }
 497 
 498   methods_seen++;
 499 
 500   // Do some special top-level things.
 501   if (depth() == 1 && C->is_osr_compilation()) {
 502     _entry_bci = C->entry_bci();
 503     _flow = method()->get_osr_flow_analysis(osr_bci());
 504     if (_flow->failing()) {
 505       C->record_method_not_compilable(_flow->failure_reason());
 506 #ifndef PRODUCT
 507       if (PrintOpto && (Verbose || WizardMode)) {
 508         tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 509         if (Verbose) {
 510           method()->print();
 511           method()->print_codes();
 512           _flow->print();
 513         }
 514       }
 515 #endif
 516     }
 517     _tf = C->tf();     // the OSR entry type is different
 518   }
 519 
 520 #ifdef ASSERT
 521   if (depth() == 1) {
 522     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
 523     if (C->tf() != tf()) {
 524       MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
 525       assert(C->env()->system_dictionary_modification_counter_changed(),
 526              "Must invalidate if TypeFuncs differ");
 527     }
 528   } else {
 529     assert(!this->is_osr_parse(), "no recursive OSR");
 530   }
 531 #endif
 532 
 533   methods_parsed++;
 534 #ifndef PRODUCT

 535   // add method size here to guarantee that inlined methods are added too
 536   if (CITime)
 537     _total_bytes_compiled += method()->code_size();
 538 
 539   show_parse_info();
 540 #endif
 541 
 542   if (failing()) {
 543     if (log)  log->done("parse");
 544     return;
 545   }
 546 
 547   gvn().set_type(root(), root()->bottom_type());
 548   gvn().transform(top());
 549 
 550   // Import the results of the ciTypeFlow.
 551   init_blocks();
 552 
 553   // Merge point for all normal exits
 554   build_exits();


 635   while (true) {
 636     bool progress = false;
 637     for (int rpo = 0; rpo < block_count(); rpo++) {
 638       Block* block = rpo_at(rpo);
 639 
 640       if (block->is_parsed()) continue;
 641 
 642       if (!block->is_merged()) {
 643         // Dead block, no state reaches this block
 644         continue;
 645       }
 646 
 647       // Prepare to parse this block.
 648       load_state_from(block);
 649 
 650       if (stopped()) {
 651         // Block is dead.
 652         continue;
 653       }
 654 
 655       blocks_parsed++;
 656 
 657       progress = true;
 658       if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
 659         // Not all preds have been parsed.  We must build phis everywhere.
 660         // (Note that dead locals do not get phis built, ever.)
 661         ensure_phis_everywhere();
 662 
 663         if (block->is_SEL_head() &&
 664             (UseLoopPredicate || LoopLimitCheck)) {
 665           // Add predicate to single entry (not irreducible) loop head.
 666           assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
 667           // Need correct bci for predicate.
 668           // It is fine to set it here since do_one_block() will set it anyway.
 669           set_parse_bci(block->start());
 670           add_predicate();
 671           // Add new region for back branches.
 672           int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
 673           RegionNode *r = new RegionNode(edges+1);
 674           _gvn.set_type(r, Type::CONTROL);
 675           record_for_igvn(r);


 695           tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx);
 696         }
 697         if (result != top()) {
 698           record_for_igvn(result);
 699         }
 700       }
 701 
 702       // Parse the block.
 703       do_one_block();
 704 
 705       // Check for bailouts.
 706       if (failing())  return;
 707     }
 708 
 709     // with irreducible loops multiple passes might be necessary to parse everything
 710     if (!has_irreducible || !progress) {
 711       break;
 712     }
 713   }
 714 

 715   blocks_seen += block_count();
 716 
 717 #ifndef PRODUCT
 718   // Make sure there are no half-processed blocks remaining.
 719   // Every remaining unprocessed block is dead and may be ignored now.
 720   for (int rpo = 0; rpo < block_count(); rpo++) {
 721     Block* block = rpo_at(rpo);
 722     if (!block->is_parsed()) {
 723       if (TraceOptoParse) {
 724         tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
 725       }
 726       assert(!block->is_merged(), "no half-processed blocks");
 727     }
 728   }
 729 #endif
 730 }
 731 
 732 //-------------------------------build_exits----------------------------------
 733 // Build normal and exceptional exit merge points.
 734 void Parse::build_exits() {
 735   // make a clone of caller to prevent sharing of side-effects
 736   _exits.set_map(_exits.clone_map());
 737   _exits.clean_stack(_exits.sp());


1429 
1430 
1431 //------------------------------do_one_block-----------------------------------
1432 void Parse::do_one_block() {
1433   if (TraceOptoParse) {
1434     Block *b = block();
1435     int ns = b->num_successors();
1436     int nt = b->all_successors();
1437 
1438     tty->print("Parsing block #%d at bci [%d,%d), successors: ",
1439                   block()->rpo(), block()->start(), block()->limit());
1440     for (int i = 0; i < nt; i++) {
1441       tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo());
1442     }
1443     if (b->is_loop_head()) tty->print("  lphd");
1444     tty->cr();
1445   }
1446 
1447   assert(block()->is_merged(), "must be merged before being parsed");
1448   block()->mark_parsed();
1449   ++_blocks_parsed;
1450 
1451   // Set iterator to start of block.
1452   iter().reset_to_bci(block()->start());
1453 
1454   CompileLog* log = C->log();
1455 
1456   // Parse bytecodes
1457   while (!stopped() && !failing()) {
1458     iter().next();
1459 
1460     // Learn the current bci from the iterator:
1461     set_parse_bci(iter().cur_bci());
1462 
1463     if (bci() == block()->limit()) {
1464       // Do not walk into the next block until directed by do_all_blocks.
1465       merge(bci());
1466       break;
1467     }
1468     assert(bci() < block()->limit(), "bci still in block");
1469 


1578 
1579 //--------------------------merge_common---------------------------------------
1580 void Parse::merge_common(Parse::Block* target, int pnum) {
1581   if (TraceOptoParse) {
1582     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1583   }
1584 
1585   // Zap extra stack slots to top
1586   assert(sp() == target->start_sp(), "");
1587   clean_stack(sp());
1588 
1589   if (!target->is_merged()) {   // No prior mapping at this bci
1590     if (TraceOptoParse) { tty->print(" with empty state");  }
1591 
1592     // If this path is dead, do not bother capturing it as a merge.
1593     // It is "as if" we had 1 fewer predecessors from the beginning.
1594     if (stopped()) {
1595       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1596       return;
1597     }
1598 
1599     // Record that a new block has been merged.
1600     ++_blocks_merged;
1601 
1602     // Make a region if we know there are multiple or unpredictable inputs.
1603     // (Also, if this is a plain fall-through, we might see another region,
1604     // which must not be allowed into this block's map.)
1605     if (pnum > PhiNode::Input         // Known multiple inputs.
1606         || target->is_handler()       // These have unpredictable inputs.
1607         || target->is_loop_head()     // Known multiple inputs
1608         || control()->is_Region()) {  // We must hide this guy.
1609 
1610       int current_bci = bci();
1611       set_parse_bci(target->start()); // Set target bci
1612       if (target->is_SEL_head()) {
1613         DEBUG_ONLY( target->mark_merged_backedge(block()); )
1614         if (target->start() == 0) {
1615           // Add loop predicate for the special case when
1616           // there are backbranches to the method entry.
1617           add_predicate();
1618         }
1619       }
1620       // Add a Region to start the new basic block.  Phis will be added




  28 #include "oops/method.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/idealGraphPrinter.hpp"
  33 #include "opto/locknode.hpp"
  34 #include "opto/memnode.hpp"
  35 #include "opto/opaquenode.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "utilities/copy.hpp"
  43 
  44 // Static array so we can figure out which bytecodes stop us from compiling
  45 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  46 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  47 
  48 #ifndef PRODUCT
  49 int nodes_created              = 0;
  50 int methods_parsed             = 0;
  51 int methods_seen               = 0;
  52 int blocks_parsed              = 0;
  53 int blocks_seen                = 0;
  54 
  55 int explicit_null_checks_inserted = 0;
  56 int explicit_null_checks_elided   = 0;
  57 int all_null_checks_found         = 0;
  58 int implicit_null_checks          = 0;




  59 

  60 bool Parse::BytecodeParseHistogram::_initialized = false;
  61 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes];
  62 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes];
  63 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes];
  64 uint Parse::BytecodeParseHistogram::_new_values       [Bytecodes::number_of_codes];

  65 
  66 //------------------------------print_statistics-------------------------------

  67 void Parse::print_statistics() {
  68   tty->print_cr("--- Compiler Statistics ---");
  69   tty->print("Methods seen: %d  Methods parsed: %d", methods_seen, methods_parsed);
  70   tty->print("  Nodes created: %d", nodes_created);
  71   tty->cr();
  72   if (methods_seen != methods_parsed) {
  73     tty->print_cr("Reasons for parse failures (NOT cumulative):");
  74   }
  75   tty->print_cr("Blocks parsed: %d  Blocks seen: %d", blocks_parsed, blocks_seen);
  76 
  77   if (explicit_null_checks_inserted) {
  78     tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,",
  79                   explicit_null_checks_inserted, explicit_null_checks_elided,
  80                   (100*explicit_null_checks_elided)/explicit_null_checks_inserted,
  81                   all_null_checks_found);
  82   }
  83   if (all_null_checks_found) {
  84     tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
  85                   (100*implicit_null_checks)/all_null_checks_found);
  86   }
  87   if (SharedRuntime::_implicit_null_throws) {
  88     tty->print_cr("%d implicit null exceptions at runtime",
  89                   SharedRuntime::_implicit_null_throws);
  90   }
  91 
  92   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  93     BytecodeParseHistogram::print();
  94   }
  95 }
  96 #endif
  97 
  98 //------------------------------ON STACK REPLACEMENT---------------------------
  99 
 100 // Construct a node which can be used to get incoming state for
 101 // on stack replacement.
 102 Node *Parse::fetch_interpreter_state(int index,
 103                                      BasicType bt,
 104                                      Node *local_addrs,
 105                                      Node *local_addrs_base) {
 106   Node *mem = memory(Compile::AliasIdxRaw);
 107   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 108   Node *ctl = control();
 109 
 110   // Very similar to LoadNode::make, except we handle un-aligned longs and
 111   // doubles on Sparc.  Intel can handle them just fine directly.
 112   Node *l = NULL;


 479   // Accumulate total sum of decompilations, also.
 480   C->set_decompile_count(C->decompile_count() + md->decompile_count());
 481 
 482   _count_invocations = C->do_count_invocations();
 483   _method_data_update = C->do_method_data_update();
 484 
 485   if (log != NULL && method()->has_exception_handlers()) {
 486     log->elem("observe that='has_exception_handlers'");
 487   }
 488 
 489   assert(method()->can_be_compiled(),       "Can not parse this method, cutout earlier");
 490   assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
 491 
 492   // Always register dependence if JVMTI is enabled, because
 493   // either breakpoint setting or hotswapping of methods may
 494   // cause deoptimization.
 495   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 496     C->dependencies()->assert_evol_method(method());
 497   }
 498 
 499   NOT_PRODUCT(methods_seen++);
 500 
 501   // Do some special top-level things.
 502   if (depth() == 1 && C->is_osr_compilation()) {
 503     _entry_bci = C->entry_bci();
 504     _flow = method()->get_osr_flow_analysis(osr_bci());
 505     if (_flow->failing()) {
 506       C->record_method_not_compilable(_flow->failure_reason());
 507 #ifndef PRODUCT
 508       if (PrintOpto && (Verbose || WizardMode)) {
 509         tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 510         if (Verbose) {
 511           method()->print();
 512           method()->print_codes();
 513           _flow->print();
 514         }
 515       }
 516 #endif
 517     }
 518     _tf = C->tf();     // the OSR entry type is different
 519   }
 520 
 521 #ifdef ASSERT
 522   if (depth() == 1) {
 523     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
 524     if (C->tf() != tf()) {
 525       MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
 526       assert(C->env()->system_dictionary_modification_counter_changed(),
 527              "Must invalidate if TypeFuncs differ");
 528     }
 529   } else {
 530     assert(!this->is_osr_parse(), "no recursive OSR");
 531   }
 532 #endif
 533 

 534 #ifndef PRODUCT
 535   methods_parsed++;
 536   // add method size here to guarantee that inlined methods are added too
 537   if (CITime)
 538     _total_bytes_compiled += method()->code_size();
 539 
 540   show_parse_info();
 541 #endif
 542 
 543   if (failing()) {
 544     if (log)  log->done("parse");
 545     return;
 546   }
 547 
 548   gvn().set_type(root(), root()->bottom_type());
 549   gvn().transform(top());
 550 
 551   // Import the results of the ciTypeFlow.
 552   init_blocks();
 553 
 554   // Merge point for all normal exits
 555   build_exits();


 636   while (true) {
 637     bool progress = false;
 638     for (int rpo = 0; rpo < block_count(); rpo++) {
 639       Block* block = rpo_at(rpo);
 640 
 641       if (block->is_parsed()) continue;
 642 
 643       if (!block->is_merged()) {
 644         // Dead block, no state reaches this block
 645         continue;
 646       }
 647 
 648       // Prepare to parse this block.
 649       load_state_from(block);
 650 
 651       if (stopped()) {
 652         // Block is dead.
 653         continue;
 654       }
 655 
 656       NOT_PRODUCT(blocks_parsed++);
 657 
 658       progress = true;
 659       if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
 660         // Not all preds have been parsed.  We must build phis everywhere.
 661         // (Note that dead locals do not get phis built, ever.)
 662         ensure_phis_everywhere();
 663 
 664         if (block->is_SEL_head() &&
 665             (UseLoopPredicate || LoopLimitCheck)) {
 666           // Add predicate to single entry (not irreducible) loop head.
 667           assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
 668           // Need correct bci for predicate.
 669           // It is fine to set it here since do_one_block() will set it anyway.
 670           set_parse_bci(block->start());
 671           add_predicate();
 672           // Add new region for back branches.
 673           int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
 674           RegionNode *r = new RegionNode(edges+1);
 675           _gvn.set_type(r, Type::CONTROL);
 676           record_for_igvn(r);


 696           tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx);
 697         }
 698         if (result != top()) {
 699           record_for_igvn(result);
 700         }
 701       }
 702 
 703       // Parse the block.
 704       do_one_block();
 705 
 706       // Check for bailouts.
 707       if (failing())  return;
 708     }
 709 
 710     // with irreducible loops multiple passes might be necessary to parse everything
 711     if (!has_irreducible || !progress) {
 712       break;
 713     }
 714   }
 715 
 716 #ifndef PRODUCT
 717   blocks_seen += block_count();
 718 

 719   // Make sure there are no half-processed blocks remaining.
 720   // Every remaining unprocessed block is dead and may be ignored now.
 721   for (int rpo = 0; rpo < block_count(); rpo++) {
 722     Block* block = rpo_at(rpo);
 723     if (!block->is_parsed()) {
 724       if (TraceOptoParse) {
 725         tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
 726       }
 727       assert(!block->is_merged(), "no half-processed blocks");
 728     }
 729   }
 730 #endif
 731 }
 732 
 733 //-------------------------------build_exits----------------------------------
 734 // Build normal and exceptional exit merge points.
 735 void Parse::build_exits() {
 736   // make a clone of caller to prevent sharing of side-effects
 737   _exits.set_map(_exits.clone_map());
 738   _exits.clean_stack(_exits.sp());


1430 
1431 
1432 //------------------------------do_one_block-----------------------------------
1433 void Parse::do_one_block() {
1434   if (TraceOptoParse) {
1435     Block *b = block();
1436     int ns = b->num_successors();
1437     int nt = b->all_successors();
1438 
1439     tty->print("Parsing block #%d at bci [%d,%d), successors: ",
1440                   block()->rpo(), block()->start(), block()->limit());
1441     for (int i = 0; i < nt; i++) {
1442       tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo());
1443     }
1444     if (b->is_loop_head()) tty->print("  lphd");
1445     tty->cr();
1446   }
1447 
1448   assert(block()->is_merged(), "must be merged before being parsed");
1449   block()->mark_parsed();

1450 
1451   // Set iterator to start of block.
1452   iter().reset_to_bci(block()->start());
1453 
1454   CompileLog* log = C->log();
1455 
1456   // Parse bytecodes
1457   while (!stopped() && !failing()) {
1458     iter().next();
1459 
1460     // Learn the current bci from the iterator:
1461     set_parse_bci(iter().cur_bci());
1462 
1463     if (bci() == block()->limit()) {
1464       // Do not walk into the next block until directed by do_all_blocks.
1465       merge(bci());
1466       break;
1467     }
1468     assert(bci() < block()->limit(), "bci still in block");
1469 


1578 
1579 //--------------------------merge_common---------------------------------------
1580 void Parse::merge_common(Parse::Block* target, int pnum) {
1581   if (TraceOptoParse) {
1582     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1583   }
1584 
1585   // Zap extra stack slots to top
1586   assert(sp() == target->start_sp(), "");
1587   clean_stack(sp());
1588 
1589   if (!target->is_merged()) {   // No prior mapping at this bci
1590     if (TraceOptoParse) { tty->print(" with empty state");  }
1591 
1592     // If this path is dead, do not bother capturing it as a merge.
1593     // It is "as if" we had 1 fewer predecessors from the beginning.
1594     if (stopped()) {
1595       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1596       return;
1597     }



1598 
1599     // Make a region if we know there are multiple or unpredictable inputs.
1600     // (Also, if this is a plain fall-through, we might see another region,
1601     // which must not be allowed into this block's map.)
1602     if (pnum > PhiNode::Input         // Known multiple inputs.
1603         || target->is_handler()       // These have unpredictable inputs.
1604         || target->is_loop_head()     // Known multiple inputs
1605         || control()->is_Region()) {  // We must hide this guy.
1606 
1607       int current_bci = bci();
1608       set_parse_bci(target->start()); // Set target bci
1609       if (target->is_SEL_head()) {
1610         DEBUG_ONLY( target->mark_merged_backedge(block()); )
1611         if (target->start() == 0) {
1612           // Add loop predicate for the special case when
1613           // there are backbranches to the method entry.
1614           add_predicate();
1615         }
1616       }
1617       // Add a Region to start the new basic block.  Phis will be added


< prev index next >