13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/gcTraceTime.inline.hpp"
26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
33 #include "gc/shenandoah/shenandoahPartialGC.hpp"
34 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
35 #include "gc/shenandoah/shenandoahUtils.hpp"
36 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
37 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
38 #include "memory/iterator.hpp"
39 #include "memory/universe.hpp"
40 #include "runtime/vmThread.hpp"
41
42 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
43 ConcurrentGCThread(),
44 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
45 _explicit_gc_waiters_lock(Mutex::leaf, "ShenandoahExplicitGC_lock", true, Monitor::_safepoint_check_always),
46 _periodic_task(this),
47 _explicit_gc_cause(GCCause::_no_cause_specified),
48 _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
49 _allocs_seen(0)
50 {
51 create_and_start();
52 _periodic_task.enroll();
53 }
105 policy->record_alloc_failure_to_full();
106 mode = stw_full;
107 }
108
109 } else if (explicit_gc_requested) {
110 // Honor explicit GC requests
111 if (ExplicitGCInvokesConcurrent) {
112 policy->record_explicit_to_concurrent();
113 if (policy->can_do_traversal_gc()) {
114 mode = concurrent_traversal;
115 } else {
116 mode = concurrent_normal;
117 }
118 } else {
119 policy->record_explicit_to_full();
120 mode = stw_full;
121 }
122 cause = _explicit_gc_cause;
123 } else {
124 // Potential normal cycle: ask heuristics if it wants to act
125 if (policy->should_start_partial_gc()) {
126 mode = concurrent_partial;
127 cause = GCCause::_shenandoah_partial_gc;
128 } else if (policy->should_start_traversal_gc()) {
129 mode = concurrent_traversal;
130 cause = GCCause::_shenandoah_traversal_gc;
131 } else if (policy->should_start_normal_gc()) {
132 mode = concurrent_normal;
133 cause = GCCause::_shenandoah_concurrent_gc;
134 }
135
136 // Ask policy if this cycle wants to process references or unload classes
137 heap->set_process_references(policy->should_process_references());
138 heap->set_unload_classes(policy->should_unload_classes());
139 }
140
141 bool gc_requested = (mode != none);
142 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
143
144 if (gc_requested) {
145 heap->reset_bytes_allocated_since_gc_start();
146
147 // If GC was requested, we are sampling the counters even without actual triggers
148 // from allocation machinery. This captures GC phases more accurately.
149 set_forced_counters_update(true);
150
151 // If GC was requested, we better dump freeset data for performance debugging
152 {
153 ShenandoahHeapLocker locker(heap->lock());
154 heap->free_set()->log_status_verbose();
155 }
156 }
157
158 switch (mode) {
159 case none:
160 break;
161 case concurrent_partial:
162 service_concurrent_partial_cycle(cause);
163 break;
164 case concurrent_traversal:
165 service_concurrent_traversal_cycle(cause);
166 break;
167 case concurrent_normal:
168 service_concurrent_normal_cycle(cause);
169 break;
170 case stw_degenerated:
171 service_stw_degenerated_cycle(cause, degen_point);
172 break;
173 case stw_full:
174 service_stw_full_cycle(cause);
175 break;
176 default:
177 ShouldNotReachHere();
178 }
179
180 if (gc_requested) {
181 heap->set_used_at_last_gc();
182
183 // If this was the explicit GC cycle, notify waiters about it
184 if (explicit_gc_requested) {
185 notify_explicit_gc_waiters();
186
187 // Explicit GC tries to uncommit everything
188 heap->handle_heap_shrinkage(os::elapsedTime());
189 }
190
191 // If this was the allocation failure GC cycle, notify waiters about it
192 if (alloc_failure_pending) {
193 notify_alloc_failure_waiters();
194 }
195
196 // Report current free set state at the end of cycle, whether
197 // it is a normal completion, or the abort.
198 {
199 ShenandoahHeapLocker locker(heap->lock());
225 }
226
227 // Wait before performing the next action. If allocation happened during this wait,
228 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
229 // back off exponentially.
230 if (_heap_changed.try_unset()) {
231 sleep = ShenandoahControlIntervalMin;
232 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
233 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
234 last_sleep_adjust_time = current;
235 }
236 os::naked_short_sleep(sleep);
237 }
238
239 // Wait for the actual stop(), can't leave run_service() earlier.
240 while (!should_terminate()) {
241 os::naked_short_sleep(ShenandoahControlIntervalMin);
242 }
243 }
244
245 void ShenandoahConcurrentThread::service_concurrent_partial_cycle(GCCause::Cause cause) {
246 ShenandoahHeap* heap = ShenandoahHeap::heap();
247 ShenandoahPartialGC* partial_gc = heap->partial_gc();
248
249 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
250
251 GCIdMark gc_id_mark;
252 ShenandoahGCSession session;
253
254 TraceCollectorStats tcs(heap->monitoring_support()->partial_collection_counters());
255
256 heap->vmop_entry_init_partial();
257 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return;
258
259 if (!partial_gc->has_work()) return;
260
261 heap->entry_partial();
262 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return;
263
264 heap->vmop_entry_final_partial();
265 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return;
266
267 heap->entry_cleanup();
268
269 heap->shenandoahPolicy()->record_success_partial();
270 }
271
272 void ShenandoahConcurrentThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
273 GCIdMark gc_id_mark;
274 ShenandoahGCSession session;
275
276 ShenandoahHeap* heap = ShenandoahHeap::heap();
277 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
278
279 heap->vmop_entry_init_traversal();
280
281 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
282
283 heap->entry_traversal();
284
285 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
286
287 heap->vmop_entry_final_traversal();
288
289 heap->entry_cleanup_bitmaps();
290
291 heap->shenandoahPolicy()->record_success_concurrent();
292 }
293
294 void ShenandoahConcurrentThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
295 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
296 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
297 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
298 // tries to evac something and no memory is available), cycle degrades to Full GC.
299 //
300 // The only current exception is allocation failure in Conc Evac: it goes straight to Full GC,
301 // because we don't recover well from the case of incompletely evacuated heap in STW cycle.
302 //
303 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
304 // heuristics says there are no regions to compact, and all the collection comes from immediately
305 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
306 // mark from the next cycle.
307 //
308 // ................................................................................................
309 //
|
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/gcTraceTime.inline.hpp"
26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
33 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
34 #include "gc/shenandoah/shenandoahUtils.hpp"
35 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
36 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/universe.hpp"
39 #include "runtime/vmThread.hpp"
40
41 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
42 ConcurrentGCThread(),
43 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
44 _explicit_gc_waiters_lock(Mutex::leaf, "ShenandoahExplicitGC_lock", true, Monitor::_safepoint_check_always),
45 _periodic_task(this),
46 _explicit_gc_cause(GCCause::_no_cause_specified),
47 _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
48 _allocs_seen(0)
49 {
50 create_and_start();
51 _periodic_task.enroll();
52 }
104 policy->record_alloc_failure_to_full();
105 mode = stw_full;
106 }
107
108 } else if (explicit_gc_requested) {
109 // Honor explicit GC requests
110 if (ExplicitGCInvokesConcurrent) {
111 policy->record_explicit_to_concurrent();
112 if (policy->can_do_traversal_gc()) {
113 mode = concurrent_traversal;
114 } else {
115 mode = concurrent_normal;
116 }
117 } else {
118 policy->record_explicit_to_full();
119 mode = stw_full;
120 }
121 cause = _explicit_gc_cause;
122 } else {
123 // Potential normal cycle: ask heuristics if it wants to act
124 ShenandoahHeap::GCCycleMode traversal_mode = policy->should_start_traversal_gc();
125 if (traversal_mode != ShenandoahHeap::NONE) {
126 mode = concurrent_traversal;
127 cause = GCCause::_shenandoah_traversal_gc;
128 heap->set_cycle_mode(traversal_mode);
129 } else if (policy->should_start_normal_gc()) {
130 mode = concurrent_normal;
131 cause = GCCause::_shenandoah_concurrent_gc;
132 heap->set_cycle_mode(ShenandoahHeap::MAJOR);
133 }
134
135 // Ask policy if this cycle wants to process references or unload classes
136 heap->set_process_references(policy->should_process_references());
137 heap->set_unload_classes(policy->should_unload_classes());
138 }
139
140 bool gc_requested = (mode != none);
141 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
142
143 if (gc_requested) {
144 heap->reset_bytes_allocated_since_gc_start();
145
146 // If GC was requested, we are sampling the counters even without actual triggers
147 // from allocation machinery. This captures GC phases more accurately.
148 set_forced_counters_update(true);
149
150 // If GC was requested, we better dump freeset data for performance debugging
151 {
152 ShenandoahHeapLocker locker(heap->lock());
153 heap->free_set()->log_status_verbose();
154 }
155 }
156
157 switch (mode) {
158 case none:
159 break;
160 case concurrent_traversal:
161 service_concurrent_traversal_cycle(cause);
162 break;
163 case concurrent_normal:
164 service_concurrent_normal_cycle(cause);
165 break;
166 case stw_degenerated:
167 service_stw_degenerated_cycle(cause, degen_point);
168 break;
169 case stw_full:
170 service_stw_full_cycle(cause);
171 break;
172 default:
173 ShouldNotReachHere();
174 }
175
176 heap->set_cycle_mode(ShenandoahHeap::NONE);
177
178 if (gc_requested) {
179 heap->set_used_at_last_gc();
180
181 // If this was the explicit GC cycle, notify waiters about it
182 if (explicit_gc_requested) {
183 notify_explicit_gc_waiters();
184
185 // Explicit GC tries to uncommit everything
186 heap->handle_heap_shrinkage(os::elapsedTime());
187 }
188
189 // If this was the allocation failure GC cycle, notify waiters about it
190 if (alloc_failure_pending) {
191 notify_alloc_failure_waiters();
192 }
193
194 // Report current free set state at the end of cycle, whether
195 // it is a normal completion, or the abort.
196 {
197 ShenandoahHeapLocker locker(heap->lock());
223 }
224
225 // Wait before performing the next action. If allocation happened during this wait,
226 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
227 // back off exponentially.
228 if (_heap_changed.try_unset()) {
229 sleep = ShenandoahControlIntervalMin;
230 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
231 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
232 last_sleep_adjust_time = current;
233 }
234 os::naked_short_sleep(sleep);
235 }
236
237 // Wait for the actual stop(), can't leave run_service() earlier.
238 while (!should_terminate()) {
239 os::naked_short_sleep(ShenandoahControlIntervalMin);
240 }
241 }
242
243 void ShenandoahConcurrentThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
244 GCIdMark gc_id_mark;
245 ShenandoahGCSession session;
246
247 ShenandoahHeap* heap = ShenandoahHeap::heap();
248 bool is_minor = heap->is_minor_gc();
249 TraceCollectorStats tcs(is_minor ? heap->monitoring_support()->partial_collection_counters()
250 : heap->monitoring_support()->concurrent_collection_counters());
251
252 heap->vmop_entry_init_traversal();
253
254 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
255
256 heap->entry_traversal();
257 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
258
259 heap->vmop_entry_final_traversal();
260
261 heap->entry_cleanup_traversal();
262
263 heap->shenandoahPolicy()->record_success_concurrent();
264 }
265
266 void ShenandoahConcurrentThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
267 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
268 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
269 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
270 // tries to evac something and no memory is available), cycle degrades to Full GC.
271 //
272 // The only current exception is allocation failure in Conc Evac: it goes straight to Full GC,
273 // because we don't recover well from the case of incompletely evacuated heap in STW cycle.
274 //
275 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
276 // heuristics says there are no regions to compact, and all the collection comes from immediately
277 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
278 // mark from the next cycle.
279 //
280 // ................................................................................................
281 //
|