< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page
rev 52800 : 8209686: cleanup arguments to PhaseIdealLoop() constructor
Reviewed-by: thartmann, kvn, pliden
rev 52801 : Upstream/backport Shenandoah to JDK11u
* * *
[backport] 8237570: Shenandoah: cleanup uses of allocation/free threshold in static heuristics
Reviewed-by: rkennke


  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif



  85 
  86 
  87 // -------------------- Compile::mach_constant_base_node -----------------------
  88 // Constant table base node singleton.
  89 MachConstantBaseNode* Compile::mach_constant_base_node() {
  90   if (_mach_constant_base_node == NULL) {
  91     _mach_constant_base_node = new MachConstantBaseNode();
  92     _mach_constant_base_node->add_req(C->root());
  93   }
  94   return _mach_constant_base_node;
  95 }
  96 
  97 
  98 /// Support for intrinsics.
  99 
 100 // Return the index at which m must be inserted (or already exists).
 101 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 102 class IntrinsicDescPair {
 103  private:
 104   ciMethod* _m;


2373     // No more loop optimizations. Remove all range check dependent CastIINodes.
2374     C->remove_range_check_casts(igvn);
2375     igvn.optimize();
2376   }
2377 
2378 #ifdef ASSERT
2379   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2380   bs->verify_gc_barriers(false);
2381 #endif
2382 
2383   {
2384     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2385     PhaseMacroExpand  mex(igvn);
2386     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2387     if (mex.expand_macro_nodes()) {
2388       assert(failing(), "must bail out w/ explicit message");
2389       return;
2390     }
2391   }
2392 









2393   if (opaque4_count() > 0) {
2394     C->remove_opaque4_nodes(igvn);
2395     igvn.optimize();
2396   }
2397 
2398   DEBUG_ONLY( _modified_nodes = NULL; )
2399  } // (End scope of igvn; run destructor if necessary for asserts.)
2400 
2401  process_print_inlining();
2402  // A method with only infinite loops has no edges entering loops from root
2403  {
2404    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2405    if (final_graph_reshaping()) {
2406      assert(failing(), "must bail out w/ explicit message");
2407      return;
2408    }
2409  }
2410 
2411  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2412 }


2813   // case Op_ConvD2L: // handled by leaf call
2814   case Op_ConD:
2815   case Op_CmpD:
2816   case Op_CmpD3:
2817     frc.inc_double_count();
2818     break;
2819   case Op_Opaque1:              // Remove Opaque Nodes before matching
2820   case Op_Opaque2:              // Remove Opaque Nodes before matching
2821   case Op_Opaque3:
2822     n->subsume_by(n->in(1), this);
2823     break;
2824   case Op_CallStaticJava:
2825   case Op_CallJava:
2826   case Op_CallDynamicJava:
2827     frc.inc_java_call_count(); // Count java call site;
2828   case Op_CallRuntime:
2829   case Op_CallLeaf:
2830   case Op_CallLeafNoFP: {
2831     assert (n->is_Call(), "");
2832     CallNode *call = n->as_Call();











2833     // Count call sites where the FP mode bit would have to be flipped.
2834     // Do not count uncommon runtime calls:
2835     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2836     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2837     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2838       frc.inc_call_count();   // Count the call site
2839     } else {                  // See if uncommon argument is shared
2840       Node *n = call->in(TypeFunc::Parms);
2841       int nop = n->Opcode();
2842       // Clone shared simple arguments to uncommon calls, item (1).
2843       if (n->outcnt() > 1 &&
2844           !n->is_Proj() &&
2845           nop != Op_CreateEx &&
2846           nop != Op_CheckCastPP &&
2847           nop != Op_DecodeN &&
2848           nop != Op_DecodeNKlass &&
2849           !n->is_Mem() &&
2850           !n->is_Phi()) {
2851         Node *x = n->clone();
2852         call->set_req(TypeFunc::Parms, x);


3377       // register allocation can be confused.
3378       ResourceMark rm;
3379       Unique_Node_List wq;
3380       wq.push(n->in(MemBarNode::Precedent));
3381       n->set_req(MemBarNode::Precedent, top());
3382       while (wq.size() > 0) {
3383         Node* m = wq.pop();
3384         if (m->outcnt() == 0) {
3385           for (uint j = 0; j < m->req(); j++) {
3386             Node* in = m->in(j);
3387             if (in != NULL) {
3388               wq.push(in);
3389             }
3390           }
3391           m->disconnect_inputs(NULL, this);
3392         }
3393       }
3394     }
3395     break;
3396   }






















3397   case Op_RangeCheck: {
3398     RangeCheckNode* rc = n->as_RangeCheck();
3399     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3400     n->subsume_by(iff, this);
3401     frc._tests.push(iff);
3402     break;
3403   }
3404   case Op_ConvI2L: {
3405     if (!Matcher::convi2l_type_required) {
3406       // Code generation on some platforms doesn't need accurate
3407       // ConvI2L types. Widening the type can help remove redundant
3408       // address computations.
3409       n->as_Type()->set_type(TypeLong::INT);
3410       ResourceMark rm;
3411       Unique_Node_List wq;
3412       wq.push(n);
3413       for (uint next = 0; next < wq.size(); next++) {
3414         Node *m = wq.at(next);
3415 
3416         for(;;) {


3813           if (use->is_Con())        continue;  // a dead ConNode is OK
3814           // At this point, we have found a dead node which is DU-reachable.
3815           if (!dead_nodes) {
3816             tty->print_cr("*** Dead nodes reachable via DU edges:");
3817             dead_nodes = true;
3818           }
3819           use->dump(2);
3820           tty->print_cr("---");
3821           checked.push(use);  // No repeats; pretend it is now checked.
3822         }
3823       }
3824       assert(!dead_nodes, "using nodes must be reachable from root");
3825     }
3826   }
3827 }
3828 
3829 // Verify GC barriers consistency
3830 // Currently supported:
3831 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3832 void Compile::verify_barriers() {
3833 #if INCLUDE_G1GC
3834   if (UseG1GC) {
3835     // Verify G1 pre-barriers





3836     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());



3837 
3838     ResourceArea *area = Thread::current()->resource_area();
3839     Unique_Node_List visited(area);
3840     Node_List worklist(area);
3841     // We're going to walk control flow backwards starting from the Root
3842     worklist.push(_root);
3843     while (worklist.size() > 0) {
3844       Node* x = worklist.pop();
3845       if (x == NULL || x == top()) continue;
3846       if (visited.member(x)) {
3847         continue;
3848       } else {
3849         visited.push(x);
3850       }
3851 
3852       if (x->is_Region()) {
3853         for (uint i = 1; i < x->req(); i++) {
3854           worklist.push(x->in(i));
3855         }
3856       } else {




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif
  85 #if INCLUDE_SHENANDOAHGC
  86 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  87 #endif
  88 
  89 
  90 // -------------------- Compile::mach_constant_base_node -----------------------
  91 // Constant table base node singleton.
  92 MachConstantBaseNode* Compile::mach_constant_base_node() {
  93   if (_mach_constant_base_node == NULL) {
  94     _mach_constant_base_node = new MachConstantBaseNode();
  95     _mach_constant_base_node->add_req(C->root());
  96   }
  97   return _mach_constant_base_node;
  98 }
  99 
 100 
 101 /// Support for intrinsics.
 102 
 103 // Return the index at which m must be inserted (or already exists).
 104 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 105 class IntrinsicDescPair {
 106  private:
 107   ciMethod* _m;


2376     // No more loop optimizations. Remove all range check dependent CastIINodes.
2377     C->remove_range_check_casts(igvn);
2378     igvn.optimize();
2379   }
2380 
2381 #ifdef ASSERT
2382   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2383   bs->verify_gc_barriers(false);
2384 #endif
2385 
2386   {
2387     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2388     PhaseMacroExpand  mex(igvn);
2389     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2390     if (mex.expand_macro_nodes()) {
2391       assert(failing(), "must bail out w/ explicit message");
2392       return;
2393     }
2394   }
2395 
2396   print_method(PHASE_BEFORE_BARRIER_EXPAND, 2);
2397 
2398 #if INCLUDE_SHENANDOAHGC
2399   if (UseShenandoahGC && ((ShenandoahBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2())->expand_barriers(this, igvn)) {
2400     assert(failing(), "must bail out w/ explicit message");
2401     return;
2402   }
2403 #endif
2404 
2405   if (opaque4_count() > 0) {
2406     C->remove_opaque4_nodes(igvn);
2407     igvn.optimize();
2408   }
2409 
2410   DEBUG_ONLY( _modified_nodes = NULL; )
2411  } // (End scope of igvn; run destructor if necessary for asserts.)
2412 
2413  process_print_inlining();
2414  // A method with only infinite loops has no edges entering loops from root
2415  {
2416    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2417    if (final_graph_reshaping()) {
2418      assert(failing(), "must bail out w/ explicit message");
2419      return;
2420    }
2421  }
2422 
2423  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2424 }


2825   // case Op_ConvD2L: // handled by leaf call
2826   case Op_ConD:
2827   case Op_CmpD:
2828   case Op_CmpD3:
2829     frc.inc_double_count();
2830     break;
2831   case Op_Opaque1:              // Remove Opaque Nodes before matching
2832   case Op_Opaque2:              // Remove Opaque Nodes before matching
2833   case Op_Opaque3:
2834     n->subsume_by(n->in(1), this);
2835     break;
2836   case Op_CallStaticJava:
2837   case Op_CallJava:
2838   case Op_CallDynamicJava:
2839     frc.inc_java_call_count(); // Count java call site;
2840   case Op_CallRuntime:
2841   case Op_CallLeaf:
2842   case Op_CallLeafNoFP: {
2843     assert (n->is_Call(), "");
2844     CallNode *call = n->as_Call();
2845 #if INCLUDE_SHENANDOAHGC
2846     if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
2847       uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
2848       if (call->req() > cnt) {
2849         assert(call->req() == cnt+1, "only one extra input");
2850         Node* addp = call->in(cnt);
2851         assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
2852         call->del_req(cnt);
2853       }
2854     }
2855 #endif
2856     // Count call sites where the FP mode bit would have to be flipped.
2857     // Do not count uncommon runtime calls:
2858     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2859     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2860     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2861       frc.inc_call_count();   // Count the call site
2862     } else {                  // See if uncommon argument is shared
2863       Node *n = call->in(TypeFunc::Parms);
2864       int nop = n->Opcode();
2865       // Clone shared simple arguments to uncommon calls, item (1).
2866       if (n->outcnt() > 1 &&
2867           !n->is_Proj() &&
2868           nop != Op_CreateEx &&
2869           nop != Op_CheckCastPP &&
2870           nop != Op_DecodeN &&
2871           nop != Op_DecodeNKlass &&
2872           !n->is_Mem() &&
2873           !n->is_Phi()) {
2874         Node *x = n->clone();
2875         call->set_req(TypeFunc::Parms, x);


3400       // register allocation can be confused.
3401       ResourceMark rm;
3402       Unique_Node_List wq;
3403       wq.push(n->in(MemBarNode::Precedent));
3404       n->set_req(MemBarNode::Precedent, top());
3405       while (wq.size() > 0) {
3406         Node* m = wq.pop();
3407         if (m->outcnt() == 0) {
3408           for (uint j = 0; j < m->req(); j++) {
3409             Node* in = m->in(j);
3410             if (in != NULL) {
3411               wq.push(in);
3412             }
3413           }
3414           m->disconnect_inputs(NULL, this);
3415         }
3416       }
3417     }
3418     break;
3419   }
3420 #if INCLUDE_SHENANDOAHGC
3421   case Op_ShenandoahCompareAndSwapP:
3422   case Op_ShenandoahCompareAndSwapN:
3423   case Op_ShenandoahWeakCompareAndSwapN:
3424   case Op_ShenandoahWeakCompareAndSwapP:
3425   case Op_ShenandoahCompareAndExchangeP:
3426   case Op_ShenandoahCompareAndExchangeN:
3427 #ifdef ASSERT
3428     if( VerifyOptoOopOffsets ) {
3429       MemNode* mem  = n->as_Mem();
3430       // Check to see if address types have grounded out somehow.
3431       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
3432       ciInstanceKlass *k = tp->klass()->as_instance_klass();
3433       bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
3434       assert( !tp || oop_offset_is_sane, "" );
3435     }
3436 #endif
3437      break;
3438   case Op_ShenandoahLoadReferenceBarrier:
3439     assert(false, "should have been expanded already");
3440     break;
3441 #endif
3442   case Op_RangeCheck: {
3443     RangeCheckNode* rc = n->as_RangeCheck();
3444     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3445     n->subsume_by(iff, this);
3446     frc._tests.push(iff);
3447     break;
3448   }
3449   case Op_ConvI2L: {
3450     if (!Matcher::convi2l_type_required) {
3451       // Code generation on some platforms doesn't need accurate
3452       // ConvI2L types. Widening the type can help remove redundant
3453       // address computations.
3454       n->as_Type()->set_type(TypeLong::INT);
3455       ResourceMark rm;
3456       Unique_Node_List wq;
3457       wq.push(n);
3458       for (uint next = 0; next < wq.size(); next++) {
3459         Node *m = wq.at(next);
3460 
3461         for(;;) {


3858           if (use->is_Con())        continue;  // a dead ConNode is OK
3859           // At this point, we have found a dead node which is DU-reachable.
3860           if (!dead_nodes) {
3861             tty->print_cr("*** Dead nodes reachable via DU edges:");
3862             dead_nodes = true;
3863           }
3864           use->dump(2);
3865           tty->print_cr("---");
3866           checked.push(use);  // No repeats; pretend it is now checked.
3867         }
3868       }
3869       assert(!dead_nodes, "using nodes must be reachable from root");
3870     }
3871   }
3872 }
3873 
3874 // Verify GC barriers consistency
3875 // Currently supported:
3876 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3877 void Compile::verify_barriers() {
3878 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
3879   if (UseG1GC || UseShenandoahGC) {
3880     // Verify G1 pre-barriers
3881 
3882 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
3883     const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset()
3884                                                 : ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3885 #elif INCLUDE_G1GC
3886     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
3887 #else
3888     const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3889 #endif
3890 
3891     ResourceArea *area = Thread::current()->resource_area();
3892     Unique_Node_List visited(area);
3893     Node_List worklist(area);
3894     // We're going to walk control flow backwards starting from the Root
3895     worklist.push(_root);
3896     while (worklist.size() > 0) {
3897       Node* x = worklist.pop();
3898       if (x == NULL || x == top()) continue;
3899       if (visited.member(x)) {
3900         continue;
3901       } else {
3902         visited.push(x);
3903       }
3904 
3905       if (x->is_Region()) {
3906         for (uint i = 1; i < x->req(); i++) {
3907           worklist.push(x->in(i));
3908         }
3909       } else {


< prev index next >