Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics


1192       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1193 
1194       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1195       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1196       _g1h->workers()->run_task(&cl, num_workers);
1197 
1198       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1199                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1200     }
1201     {
1202       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1203       reclaim_empty_regions();
1204     }
1205 
1206     // Clean out dead classes
1207     if (ClassUnloadingWithConcurrentMark) {
1208       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1209       ClassLoaderDataGraph::purge();
1210     }
1211 
1212     _g1h->resize_heap_if_necessary();
1213 
1214     compute_new_sizes();
1215 
1216     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1217 
1218     assert(!restart_for_overflow(), "sanity");
1219     // Completely reset the marking state since marking completed
1220     reset_at_marking_complete();
1221   } else {
1222     // We overflowed.  Restart concurrent marking.
1223     _restart_for_overflow = true;
1224 
1225     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1226 
1227     // Clear the marking state because we will be restarting
1228     // marking due to overflowing the global mark stack.
1229     reset_marking_for_restart();
1230   }
1231 
1232   {
1233     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);


1364   double start = os::elapsedTime();
1365 
1366   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1367 
1368   {
1369     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1370     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1371     _g1h->heap_region_iterate(&cl);
1372   }
1373 
1374   if (log_is_enabled(Trace, gc, liveness)) {
1375     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1376     _g1h->heap_region_iterate(&cl);
1377   }
1378 
1379   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1380 
1381   // We need to make this be a "collection" so any collection pause that
1382   // races with it goes around and waits for Cleanup to finish.
1383   _g1h->increment_total_collections();





1384 
1385   // Local statistics
1386   double recent_cleanup_time = (os::elapsedTime() - start);
1387   _total_cleanup_time += recent_cleanup_time;
1388   _cleanup_times.add(recent_cleanup_time);
1389 
1390   {
1391     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1392     policy->record_concurrent_mark_cleanup_end();
1393   }
1394 }
1395 
1396 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1397 // Uses the G1CMTask associated with a worker thread (for serial reference
1398 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1399 // trace referent objects.
1400 //
1401 // Using the G1CMTask and embedded local queues avoids having the worker
1402 // threads operating on the global mark stack. This reduces the risk
1403 // of overflowing the stack - which we would rather avoid at this late




1192       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1193 
1194       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1195       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1196       _g1h->workers()->run_task(&cl, num_workers);
1197 
1198       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1199                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1200     }
1201     {
1202       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1203       reclaim_empty_regions();
1204     }
1205 
1206     // Clean out dead classes
1207     if (ClassUnloadingWithConcurrentMark) {
1208       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1209       ClassLoaderDataGraph::purge();
1210     }
1211 


1212     compute_new_sizes();
1213 
1214     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1215 
1216     assert(!restart_for_overflow(), "sanity");
1217     // Completely reset the marking state since marking completed
1218     reset_at_marking_complete();
1219   } else {
1220     // We overflowed.  Restart concurrent marking.
1221     _restart_for_overflow = true;
1222 
1223     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1224 
1225     // Clear the marking state because we will be restarting
1226     // marking due to overflowing the global mark stack.
1227     reset_marking_for_restart();
1228   }
1229 
1230   {
1231     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);


1362   double start = os::elapsedTime();
1363 
1364   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1365 
1366   {
1367     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1368     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1369     _g1h->heap_region_iterate(&cl);
1370   }
1371 
1372   if (log_is_enabled(Trace, gc, liveness)) {
1373     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1374     _g1h->heap_region_iterate(&cl);
1375   }
1376 
1377   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1378 
1379   // We need to make this be a "collection" so any collection pause that
1380   // races with it goes around and waits for Cleanup to finish.
1381   _g1h->increment_total_collections();
1382 
1383   {
1384     GCTraceTime(Debug, gc, phases) debug("Expand heap after concurrent mark", _gc_timer_cm);
1385     _g1h->expand_heap_after_concurrent_mark();
1386   }
1387 
1388   // Local statistics
1389   double recent_cleanup_time = (os::elapsedTime() - start);
1390   _total_cleanup_time += recent_cleanup_time;
1391   _cleanup_times.add(recent_cleanup_time);
1392 
1393   {
1394     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1395     policy->record_concurrent_mark_cleanup_end();
1396   }
1397 }
1398 
1399 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1400 // Uses the G1CMTask associated with a worker thread (for serial reference
1401 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1402 // trace referent objects.
1403 //
1404 // Using the G1CMTask and embedded local queues avoids having the worker
1405 // threads operating on the global mark stack. This reduces the risk
1406 // of overflowing the stack - which we would rather avoid at this late