src/share/vm/opto/parse2.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/opto/parse2.cpp

src/share/vm/opto/parse2.cpp

Print this page
rev 7387 : 8063137: Never-taken branches should be pruned when GWT LambdaForms are shared
Reviewed-by: ?

*** 35,44 **** --- 35,45 ---- #include "opto/divnode.hpp" #include "opto/idealGraphPrinter.hpp" #include "opto/matcher.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" + #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp"
*** 765,796 **** //--------------------------dynamic_branch_prediction-------------------------- // Try to gather dynamic branch prediction behavior. Return a probability // of the branch being taken and set the "cnt" field. Returns a -1.0 // if we need to use static prediction for some reason. ! float Parse::dynamic_branch_prediction(float &cnt) { ResourceMark rm; cnt = COUNT_UNKNOWN; // Use MethodData information if it is available // FIXME: free the ProfileData structure ciMethodData* methodData = method()->method_data(); if (!methodData->is_mature()) return PROB_UNKNOWN; ciProfileData* data = methodData->bci_to_data(bci()); if (!data->is_JumpData()) return PROB_UNKNOWN; // get taken and not taken values ! int taken = data->as_JumpData()->taken(); ! int not_taken = 0; if (data->is_BranchData()) { not_taken = data->as_BranchData()->not_taken(); } // scale the counts to be commensurate with invocation counts: taken = method()->scale_count(taken); not_taken = method()->scale_count(not_taken); // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. // We also check that individual counters are positive first, overwise the sum can become positive. if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { if (C->log() != NULL) { --- 766,814 ---- //--------------------------dynamic_branch_prediction-------------------------- // Try to gather dynamic branch prediction behavior. Return a probability // of the branch being taken and set the "cnt" field. Returns a -1.0 // if we need to use static prediction for some reason. ! float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { ResourceMark rm; cnt = COUNT_UNKNOWN; + int taken = 0; + int not_taken = 0; + + if (method()->ignore_profile()) { + if (btest == BoolTest::eq && test->is_Cmp() && test->in(1)->Opcode() == Op_Opaque4) { + Opaque4Node* opq = (Opaque4Node*)test->in(1); + taken = opq->taken(); + not_taken = opq->not_taken(); + opq->consume(); + } else { + // No profile info. Be conservative. + int cnt = method()->interpreter_invocation_count(); + taken = cnt / 2; + not_taken = cnt - taken; + } + } else { // Use MethodData information if it is available // FIXME: free the ProfileData structure ciMethodData* methodData = method()->method_data(); if (!methodData->is_mature()) return PROB_UNKNOWN; ciProfileData* data = methodData->bci_to_data(bci()); if (!data->is_JumpData()) return PROB_UNKNOWN; // get taken and not taken values ! taken = data->as_JumpData()->taken(); ! not_taken = 0; if (data->is_BranchData()) { not_taken = data->as_BranchData()->not_taken(); } // scale the counts to be commensurate with invocation counts: taken = method()->scale_count(taken); not_taken = method()->scale_count(not_taken); + } // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. // We also check that individual counters are positive first, overwise the sum can become positive. if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { if (C->log() != NULL) {
*** 839,850 **** } //-----------------------------branch_prediction------------------------------- float Parse::branch_prediction(float& cnt, BoolTest::mask btest, ! int target_bci) { ! float prob = dynamic_branch_prediction(cnt); // If prob is unknown, switch to static prediction if (prob != PROB_UNKNOWN) return prob; prob = PROB_FAIR; // Set default value if (btest == BoolTest::eq) // Exactly equal test? --- 857,869 ---- } //-----------------------------branch_prediction------------------------------- float Parse::branch_prediction(float& cnt, BoolTest::mask btest, ! int target_bci, ! Node* test) { ! float prob = dynamic_branch_prediction(cnt, btest, test); // If prob is unknown, switch to static prediction if (prob != PROB_UNKNOWN) return prob; prob = PROB_FAIR; // Set default value if (btest == BoolTest::eq) // Exactly equal test?
*** 930,940 **** Block* branch_block = successor_for_bci(target_bci); Block* next_block = successor_for_bci(iter().next_bci()); float cnt; ! float prob = branch_prediction(cnt, btest, target_bci); if (prob == PROB_UNKNOWN) { // (An earlier version of do_ifnull omitted this trap for OSR methods.) #ifndef PRODUCT if (PrintOpto && Verbose) tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); --- 949,959 ---- Block* branch_block = successor_for_bci(target_bci); Block* next_block = successor_for_bci(iter().next_bci()); float cnt; ! float prob = branch_prediction(cnt, btest, target_bci, c); if (prob == PROB_UNKNOWN) { // (An earlier version of do_ifnull omitted this trap for OSR methods.) #ifndef PRODUCT if (PrintOpto && Verbose) tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
*** 1011,1021 **** Block* branch_block = successor_for_bci(target_bci); Block* next_block = successor_for_bci(iter().next_bci()); float cnt; ! float prob = branch_prediction(cnt, btest, target_bci); float untaken_prob = 1.0 - prob; if (prob == PROB_UNKNOWN) { #ifndef PRODUCT if (PrintOpto && Verbose) --- 1030,1040 ---- Block* branch_block = successor_for_bci(target_bci); Block* next_block = successor_for_bci(iter().next_bci()); float cnt; ! float prob = branch_prediction(cnt, btest, target_bci, c); float untaken_prob = 1.0 - prob; if (prob == PROB_UNKNOWN) { #ifndef PRODUCT if (PrintOpto && Verbose)
src/share/vm/opto/parse2.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File