< prev index next >
src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.cpp
Print this page
rev 10658 : [backport] Single marking bitmap
rev 10685 : [backport] Rename ShHeap::shenandoahPolicy -> ShHeap::shenandoah_policy
rev 10690 : [backport] Cleanup header files and forward declarations
rev 10740 : [backport] Protect more internal code from false sharing
rev 10748 : [backport] Handle metadata induced GC
rev 10755 : [backport] Make heuristics tell if we can process references or unload classes
rev 10756 : [backport] Factor out implicit/explicit GC requests
rev 10772 : [backport] Update copyrights
rev 10801 : [backport] Rename vm_operations_shenandoah.* to shenandoahVMOperations.*
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
@@ -20,10 +20,11 @@
* questions.
*
*/
#include "precompiled.hpp"
+
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
@@ -31,30 +32,29 @@
#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
#include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc_implementation/shenandoah/shenandoahUtils.hpp"
+#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
-#include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
#include "memory/iterator.hpp"
#include "memory/universe.hpp"
-#include "runtime/vmThread.hpp"
#ifdef _WINDOWS
#pragma warning(disable : 4355)
#endif
SurrogateLockerThread* ShenandoahControlThread::_slt = NULL;
ShenandoahControlThread::ShenandoahControlThread() :
ConcurrentGCThread(),
_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true),
- _explicit_gc_waiters_lock(Mutex::leaf, "ShenandoahExplicitGC_lock", true),
+ _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true),
_periodic_task(this),
- _allocs_seen(0),
- _explicit_gc_cause(GCCause::_no_cause_specified),
- _degen_point(ShenandoahHeap::_degenerated_outside_cycle) {
+ _requested_gc_cause(GCCause::_no_cause_specified),
+ _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
+ _allocs_seen(0) {
create_and_start();
_periodic_task.enroll();
_periodic_satb_flush_task.enroll();
}
@@ -96,17 +96,18 @@
// Having a period 10x lower than the delay would mean we hit the
// shrinking with lag of less than 1/10-th of true delay.
// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
- ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
+ ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
ShenandoahHeuristics* heuristics = heap->heuristics();
while (!in_graceful_shutdown() && !_should_terminate) {
// Figure out if we have pending requests.
bool alloc_failure_pending = _alloc_failure_gc.is_set();
- bool explicit_gc_requested = _explicit_gc.is_set();
+ bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
+ bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
// This control loop iteration have seen this much allocations.
intptr_t allocs_seen = (intptr_t)(Atomic::xchg_ptr(0, &_allocs_seen));
// Choose which GC mode to run in. The block below should select a single mode.
@@ -133,24 +134,43 @@
policy->record_alloc_failure_to_full();
mode = stw_full;
}
} else if (explicit_gc_requested) {
- // Honor explicit GC requests
- log_info(gc)("Trigger: Explicit GC request");
+ cause = _requested_gc_cause;
+ log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
- cause = _explicit_gc_cause;
+ heuristics->record_requested_gc();
if (ExplicitGCInvokesConcurrent) {
- heuristics->record_explicit_gc();
policy->record_explicit_to_concurrent();
mode = concurrent_normal;
+
+ // Unload and clean up everything
+ heap->set_process_references(heuristics->can_process_references());
+ heap->set_unload_classes(heuristics->can_unload_classes());
} else {
- heuristics->record_explicit_gc();
policy->record_explicit_to_full();
mode = stw_full;
}
+ } else if (implicit_gc_requested) {
+ cause = _requested_gc_cause;
+ log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
+
+ heuristics->record_requested_gc();
+
+ if (ExplicitGCInvokesConcurrent) {
+ policy->record_implicit_to_concurrent();
+ mode = concurrent_normal;
+
+ // Unload and clean up everything
+ heap->set_process_references(heuristics->can_process_references());
+ heap->set_unload_classes(heuristics->can_unload_classes());
+ } else {
+ policy->record_implicit_to_full();
+ mode = stw_full;
+ }
} else {
// Potential normal cycle: ask heuristics if it wants to act
if (heuristics->should_start_normal_gc()) {
mode = concurrent_normal;
cause = GCCause::_shenandoah_concurrent_gc;
@@ -199,13 +219,13 @@
default:
ShouldNotReachHere();
}
if (gc_requested) {
- // If this was the explicit GC cycle, notify waiters about it
- if (explicit_gc_requested) {
- notify_explicit_gc_waiters();
+ // If this was the requested GC cycle, notify waiters about it
+ if (explicit_gc_requested || implicit_gc_requested) {
+ notify_gc_waiters();
}
// If this was the allocation failure GC cycle, notify waiters about it
if (alloc_failure_pending) {
notify_alloc_failure_waiters();
@@ -229,10 +249,15 @@
set_forced_counters_update(false);
// Retract forceful part of soft refs policy
heap->collector_policy()->set_should_clear_all_soft_refs(false);
+ // Clear metaspace oom flag, if current cycle unloaded classes
+ if (heap->unload_classes()) {
+ heuristics->clear_metaspace_oom();
+ }
+
// GC is over, we are at idle now
if (ShenandoahPacing) {
heap->pacer()->setup_for_idle();
}
} else {
@@ -317,10 +342,13 @@
ShenandoahGCSession session(cause);
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+ // Reset for upcoming marking
+ heap->entry_reset();
+
// Start initial mark under STW
heap->vmop_entry_init_mark();
// Continue concurrent mark
heap->entry_mark();
@@ -361,16 +389,16 @@
} else {
heap->vmop_entry_final_evac();
}
}
- // Reclaim space and prepare for the next normal cycle:
- heap->entry_cleanup_bitmaps();
+ // Reclaim space after cycle
+ heap->entry_cleanup();
// Cycle is complete
heap->heuristics()->record_success_concurrent();
- heap->shenandoahPolicy()->record_success_concurrent();
+ heap->shenandoah_policy()->record_success_concurrent();
}
bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (heap->cancelled_gc()) {
@@ -409,22 +437,22 @@
ShenandoahGCSession session(cause);
heap->vmop_entry_full(cause);
heap->heuristics()->record_success_full();
- heap->shenandoahPolicy()->record_success_full();
+ heap->shenandoah_policy()->record_success_full();
}
void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahGCSession session(cause);
heap->vmop_degenerated(point);
heap->heuristics()->record_success_degenerated();
- heap->shenandoahPolicy()->record_success_degenerated();
+ heap->shenandoah_policy()->record_success_degenerated();
}
void ShenandoahControlThread::service_uncommit(double shrink_before) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
@@ -444,24 +472,39 @@
if (has_work) {
heap->entry_uncommit(shrink_before);
}
}
-void ShenandoahControlThread::handle_explicit_gc(GCCause::Cause cause) {
+bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
+ return GCCause::is_user_requested_gc(cause) ||
+ GCCause::is_serviceability_requested_gc(cause);
+}
+
+void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
assert(GCCause::is_user_requested_gc(cause) ||
GCCause::is_serviceability_requested_gc(cause) ||
- cause == GCCause::_full_gc_alot,
+ cause == GCCause::_shenandoah_metadata_gc_clear_softrefs ||
+ cause == GCCause::_full_gc_alot ||
+ cause == GCCause::_scavenge_alot,
"only requested GCs here");
+
+ if (is_explicit_gc(cause)) {
if (!DisableExplicitGC) {
- _explicit_gc_cause = cause;
+ handle_requested_gc(cause);
+ }
+ } else {
+ handle_requested_gc(cause);
+ }
+}
- _explicit_gc.set();
- MonitorLockerEx ml(&_explicit_gc_waiters_lock);
- while (_explicit_gc.is_set()) {
+void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
+ _requested_gc_cause = cause;
+ _gc_requested.set();
+ MonitorLockerEx ml(&_gc_waiters_lock);
+ while (_gc_requested.is_set()) {
ml.wait();
}
- }
}
void ShenandoahControlThread::handle_alloc_failure(size_t words) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
@@ -508,13 +551,13 @@
bool ShenandoahControlThread::is_alloc_failure_gc() {
return _alloc_failure_gc.is_set();
}
-void ShenandoahControlThread::notify_explicit_gc_waiters() {
- _explicit_gc.unset();
- MonitorLockerEx ml(&_explicit_gc_waiters_lock);
+void ShenandoahControlThread::notify_gc_waiters() {
+ _gc_requested.unset();
+ MonitorLockerEx ml(&_gc_waiters_lock);
ml.notify_all();
}
void ShenandoahControlThread::handle_counters_update() {
if (_do_counters_update.is_set()) {
< prev index next >