< prev index next >
src/hotspot/share/opto/compile.cpp
Print this page
rev 52710 : Upstream/backport Shenandoah to JDK11u
@@ -80,10 +80,13 @@
#include "gc/g1/g1ThreadLocalData.hpp"
#endif // INCLUDE_G1GC
#if INCLUDE_ZGC
#include "gc/z/c2/zBarrierSetC2.hpp"
#endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#endif
// -------------------- Compile::mach_constant_base_node -----------------------
// Constant table base node singleton.
MachConstantBaseNode* Compile::mach_constant_base_node() {
@@ -2102,11 +2105,11 @@
if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
// PhaseIdealLoop is expensive so we only try it once we are
// out of live nodes and we only try it again if the previous
// helped got the number of nodes down significantly
- PhaseIdealLoop ideal_loop( igvn, false, true );
+ PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
if (failing()) return;
low_live_nodes = live_nodes();
_major_progress = true;
}
@@ -2153,10 +2156,25 @@
set_inlining_incrementally(false);
}
+bool Compile::optimize_loops(int& loop_opts_cnt, PhaseIterGVN& igvn, LoopOptsMode mode) {
+ if(loop_opts_cnt > 0) {
+ debug_only( int cnt = 0; );
+ while(major_progress() && (loop_opts_cnt > 0)) {
+ TracePhase tp("idealLoop", &timers[_t_idealLoop]);
+ assert( cnt++ < 40, "infinite cycle in loop optimization" );
+ PhaseIdealLoop ideal_loop(igvn, mode);
+ loop_opts_cnt--;
+ if (failing()) return false;
+ if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
+ }
+ }
+ return true;
+}
+
//------------------------------Optimize---------------------------------------
// Given a graph, optimize it.
void Compile::Optimize() {
TracePhase tp("optimizer", &timers[_t_optimizer]);
@@ -2191,14 +2209,14 @@
{
TracePhase tp("iterGVN", &timers[_t_iterGVN]);
igvn.optimize();
}
- print_method(PHASE_ITER_GVN1, 2);
-
if (failing()) return;
+ print_method(PHASE_ITER_GVN1, 2);
+
inline_incrementally(igvn);
print_method(PHASE_INCREMENTAL_INLINE, 2);
if (failing()) return;
@@ -2243,11 +2261,11 @@
// Perform escape analysis
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
if (has_loops()) {
// Cleanup graph (remove dead nodes).
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop( igvn, false, true );
+ PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
if (failing()) return;
}
ConnectionGraph::do_analysis(this, &igvn);
@@ -2278,27 +2296,27 @@
// Set loop opts counter
loop_opts_cnt = num_loop_opts();
if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
{
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop( igvn, true );
+ PhaseIdealLoop ideal_loop(igvn, LoopOptsDefault);
loop_opts_cnt--;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
if (failing()) return;
}
// Loop opts pass if partial peeling occurred in previous pass
if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop( igvn, false );
+ PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
loop_opts_cnt--;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
if (failing()) return;
}
// Loop opts pass for loop-unrolling before CCP
if(major_progress() && (loop_opts_cnt > 0)) {
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop( igvn, false );
+ PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
loop_opts_cnt--;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
}
if (!failing()) {
// Verify that last round of loop opts produced a valid graph
@@ -2330,20 +2348,12 @@
if (failing()) return;
// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.
- if(loop_opts_cnt > 0) {
- debug_only( int cnt = 0; );
- while(major_progress() && (loop_opts_cnt > 0)) {
- TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- assert( cnt++ < 40, "infinite cycle in loop optimization" );
- PhaseIdealLoop ideal_loop( igvn, true);
- loop_opts_cnt--;
- if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
- if (failing()) return;
- }
+ if (!optimize_loops(loop_opts_cnt, igvn, LoopOptsDefault)) {
+ return;
}
#if INCLUDE_ZGC
if (UseZGC) {
ZBarrierSetC2::find_dominating_barriers(igvn);
@@ -2381,10 +2391,19 @@
assert(failing(), "must bail out w/ explicit message");
return;
}
}
+ print_method(PHASE_BEFORE_BARRIER_EXPAND, 2);
+
+#if INCLUDE_SHENANDOAHGC
+ if (UseShenandoahGC && ((ShenandoahBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2())->expand_barriers(this, igvn)) {
+ assert(failing(), "must bail out w/ explicit message");
+ return;
+ }
+#endif
+
if (opaque4_count() > 0) {
C->remove_opaque4_nodes(igvn);
igvn.optimize();
}
@@ -2821,10 +2840,21 @@
case Op_CallRuntime:
case Op_CallLeaf:
case Op_CallLeafNoFP: {
assert (n->is_Call(), "");
CallNode *call = n->as_Call();
+#if INCLUDE_SHENANDOAHGC
+ if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
+ uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
+ if (call->req() > cnt) {
+ assert(call->req() == cnt+1, "only one extra input");
+ Node* addp = call->in(cnt);
+ assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
+ call->del_req(cnt);
+ }
+ }
+#endif
// Count call sites where the FP mode bit would have to be flipped.
// Do not count uncommon runtime calls:
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
@@ -3385,10 +3415,32 @@
}
}
}
break;
}
+#if INCLUDE_SHENANDOAHGC
+ case Op_ShenandoahCompareAndSwapP:
+ case Op_ShenandoahCompareAndSwapN:
+ case Op_ShenandoahWeakCompareAndSwapN:
+ case Op_ShenandoahWeakCompareAndSwapP:
+ case Op_ShenandoahCompareAndExchangeP:
+ case Op_ShenandoahCompareAndExchangeN:
+#ifdef ASSERT
+ if( VerifyOptoOopOffsets ) {
+ MemNode* mem = n->as_Mem();
+ // Check to see if address types have grounded out somehow.
+ const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
+ ciInstanceKlass *k = tp->klass()->as_instance_klass();
+ bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
+ assert( !tp || oop_offset_is_sane, "" );
+ }
+#endif
+ break;
+ case Op_ShenandoahLoadReferenceBarrier:
+ assert(false, "should have been expanded already");
+ break;
+#endif
case Op_RangeCheck: {
RangeCheckNode* rc = n->as_RangeCheck();
Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
n->subsume_by(iff, this);
frc._tests.push(iff);
@@ -3821,14 +3873,22 @@
// Verify GC barriers consistency
// Currently supported:
// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
void Compile::verify_barriers() {
-#if INCLUDE_G1GC
- if (UseG1GC) {
+#if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
+ if (UseG1GC || UseShenandoahGC) {
// Verify G1 pre-barriers
+
+#if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
+ const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset()
+ : ShenandoahThreadLocalData::satb_mark_queue_active_offset());
+#elif INCLUDE_G1GC
const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+#else
+ const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
+#endif
ResourceArea *area = Thread::current()->resource_area();
Unique_Node_List visited(area);
Node_List worklist(area);
// We're going to walk control flow backwards starting from the Root
< prev index next >