1309 }
1310 };
1311
1312 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1313 // world is stopped at this checkpoint
1314 assert(SafepointSynchronize::is_at_safepoint(),
1315 "world should be stopped");
1316
1317 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1318
1319 // If a full collection has happened, we shouldn't do this.
1320 if (has_aborted()) {
1321 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1322 return;
1323 }
1324
1325 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1326
1327 if (VerifyDuringGC) {
1328 HandleMark hm; // handle scope
1329 Universe::heap()->prepare_for_verify();
1330 Universe::verify(VerifyOption_G1UsePrevMarking,
1331 " VerifyDuringGC:(before)");
1332 }
1333 g1h->check_bitmaps("Remark Start");
1334
1335 G1CollectorPolicy* g1p = g1h->g1_policy();
1336 g1p->record_concurrent_mark_remark_start();
1337
1338 double start = os::elapsedTime();
1339
1340 checkpointRootsFinalWork();
1341
1342 double mark_work_end = os::elapsedTime();
1343
1344 weakRefsWork(clear_all_soft_refs);
1345
1346 if (has_overflown()) {
1347 // Oops. We overflowed. Restart concurrent marking.
1348 _restart_for_overflow = true;
1349 if (G1TraceMarkStackOverflow) {
1350 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1351 }
1352
1353 // Verify the heap w.r.t. the previous marking bitmap.
1354 if (VerifyDuringGC) {
1355 HandleMark hm; // handle scope
1356 Universe::heap()->prepare_for_verify();
1357 Universe::verify(VerifyOption_G1UsePrevMarking,
1358 " VerifyDuringGC:(overflow)");
1359 }
1360
1361 // Clear the marking state because we will be restarting
1362 // marking due to overflowing the global mark stack.
1363 reset_marking_state();
1364 } else {
1365 {
1366 G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1367
1368 // Aggregate the per-task counting data that we have accumulated
1369 // while marking.
1370 aggregate_count_data();
1371 }
1372
1373 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1374 // We're done with marking.
1375 // This is the end of the marking cycle, we're expected all
1376 // threads to have SATB queues with active set to true.
1377 satb_mq_set.set_active_all_threads(false, /* new active value */
1378 true /* expected_active */);
1379
1380 if (VerifyDuringGC) {
1381 HandleMark hm; // handle scope
1382 Universe::heap()->prepare_for_verify();
1383 Universe::verify(VerifyOption_G1UseNextMarking,
1384 " VerifyDuringGC:(after)");
1385 }
1386 g1h->check_bitmaps("Remark End");
1387 assert(!restart_for_overflow(), "sanity");
1388 // Completely reset the marking state since marking completed
1389 set_non_marking_state();
1390 }
1391
1392 // Expand the marking stack, if we have to and if we can.
1393 if (_markStack.should_expand()) {
1394 _markStack.expand();
1395 }
1396
1397 // Statistics
1398 double now = os::elapsedTime();
1399 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1400 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1401 _remark_times.add((now - start) * 1000.0);
1402
1970 }
1971
1972 };
1973
1974 void ConcurrentMark::cleanup() {
1975 // world is stopped at this checkpoint
1976 assert(SafepointSynchronize::is_at_safepoint(),
1977 "world should be stopped");
1978 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1979
1980 // If a full collection has happened, we shouldn't do this.
1981 if (has_aborted()) {
1982 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1983 return;
1984 }
1985
1986 g1h->verify_region_sets_optional();
1987
1988 if (VerifyDuringGC) {
1989 HandleMark hm; // handle scope
1990 Universe::heap()->prepare_for_verify();
1991 Universe::verify(VerifyOption_G1UsePrevMarking,
1992 " VerifyDuringGC:(before)");
1993 }
1994 g1h->check_bitmaps("Cleanup Start");
1995
1996 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1997 g1p->record_concurrent_mark_cleanup_start();
1998
1999 double start = os::elapsedTime();
2000
2001 HeapRegionRemSet::reset_for_cleanup_tasks();
2002
2003 uint n_workers;
2004
2005 // Do counting once more with the world stopped for good measure.
2006 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2007
2008 g1h->set_par_threads();
2009 n_workers = g1h->n_par_threads();
2010 assert(g1h->n_par_threads() == n_workers,
2011 "Should not have been reset");
2012 g1h->workers()->run_task(&g1_par_count_task);
2013 // Done with the parallel phase so reset to 0.
2014 g1h->set_par_threads(0);
2015
2016 if (VerifyDuringGC) {
2081 }
2082
2083 // this will also free any regions totally full of garbage objects,
2084 // and sort the regions.
2085 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2086
2087 // Statistics.
2088 double end = os::elapsedTime();
2089 _cleanup_times.add((end - start) * 1000.0);
2090
2091 if (G1Log::fine()) {
2092 g1h->g1_policy()->print_heap_transition(start_used_bytes);
2093 }
2094
2095 // Clean up will have freed any regions completely full of garbage.
2096 // Update the soft reference policy with the new heap occupancy.
2097 Universe::update_heap_info_at_gc();
2098
2099 if (VerifyDuringGC) {
2100 HandleMark hm; // handle scope
2101 Universe::heap()->prepare_for_verify();
2102 Universe::verify(VerifyOption_G1UsePrevMarking,
2103 " VerifyDuringGC:(after)");
2104 }
2105
2106 g1h->check_bitmaps("Cleanup End");
2107
2108 g1h->verify_region_sets_optional();
2109
2110 // We need to make this be a "collection" so any collection pause that
2111 // races with it goes around and waits for completeCleanup to finish.
2112 g1h->increment_total_collections();
2113
2114 // Clean out dead classes and update Metaspace sizes.
2115 if (ClassUnloadingWithConcurrentMark) {
2116 ClassLoaderDataGraph::purge();
2117 }
2118 MetaspaceGC::compute_new_size();
2119
2120 // We reclaimed old regions so we should calculate the sizes to make
2121 // sure we update the old gen/space data.
|
1309 }
1310 };
1311
1312 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1313 // world is stopped at this checkpoint
1314 assert(SafepointSynchronize::is_at_safepoint(),
1315 "world should be stopped");
1316
1317 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1318
1319 // If a full collection has happened, we shouldn't do this.
1320 if (has_aborted()) {
1321 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1322 return;
1323 }
1324
1325 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1326
1327 if (VerifyDuringGC) {
1328 HandleMark hm; // handle scope
1329 g1h->prepare_for_verify();
1330 Universe::verify(VerifyOption_G1UsePrevMarking,
1331 " VerifyDuringGC:(before)");
1332 }
1333 g1h->check_bitmaps("Remark Start");
1334
1335 G1CollectorPolicy* g1p = g1h->g1_policy();
1336 g1p->record_concurrent_mark_remark_start();
1337
1338 double start = os::elapsedTime();
1339
1340 checkpointRootsFinalWork();
1341
1342 double mark_work_end = os::elapsedTime();
1343
1344 weakRefsWork(clear_all_soft_refs);
1345
1346 if (has_overflown()) {
1347 // Oops. We overflowed. Restart concurrent marking.
1348 _restart_for_overflow = true;
1349 if (G1TraceMarkStackOverflow) {
1350 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1351 }
1352
1353 // Verify the heap w.r.t. the previous marking bitmap.
1354 if (VerifyDuringGC) {
1355 HandleMark hm; // handle scope
1356 g1h->prepare_for_verify();
1357 Universe::verify(VerifyOption_G1UsePrevMarking,
1358 " VerifyDuringGC:(overflow)");
1359 }
1360
1361 // Clear the marking state because we will be restarting
1362 // marking due to overflowing the global mark stack.
1363 reset_marking_state();
1364 } else {
1365 {
1366 G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1367
1368 // Aggregate the per-task counting data that we have accumulated
1369 // while marking.
1370 aggregate_count_data();
1371 }
1372
1373 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1374 // We're done with marking.
1375 // This is the end of the marking cycle, we're expected all
1376 // threads to have SATB queues with active set to true.
1377 satb_mq_set.set_active_all_threads(false, /* new active value */
1378 true /* expected_active */);
1379
1380 if (VerifyDuringGC) {
1381 HandleMark hm; // handle scope
1382 g1h->prepare_for_verify();
1383 Universe::verify(VerifyOption_G1UseNextMarking,
1384 " VerifyDuringGC:(after)");
1385 }
1386 g1h->check_bitmaps("Remark End");
1387 assert(!restart_for_overflow(), "sanity");
1388 // Completely reset the marking state since marking completed
1389 set_non_marking_state();
1390 }
1391
1392 // Expand the marking stack, if we have to and if we can.
1393 if (_markStack.should_expand()) {
1394 _markStack.expand();
1395 }
1396
1397 // Statistics
1398 double now = os::elapsedTime();
1399 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1400 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1401 _remark_times.add((now - start) * 1000.0);
1402
1970 }
1971
1972 };
1973
1974 void ConcurrentMark::cleanup() {
1975 // world is stopped at this checkpoint
1976 assert(SafepointSynchronize::is_at_safepoint(),
1977 "world should be stopped");
1978 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1979
1980 // If a full collection has happened, we shouldn't do this.
1981 if (has_aborted()) {
1982 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1983 return;
1984 }
1985
1986 g1h->verify_region_sets_optional();
1987
1988 if (VerifyDuringGC) {
1989 HandleMark hm; // handle scope
1990 g1h->prepare_for_verify();
1991 Universe::verify(VerifyOption_G1UsePrevMarking,
1992 " VerifyDuringGC:(before)");
1993 }
1994 g1h->check_bitmaps("Cleanup Start");
1995
1996 G1CollectorPolicy* g1p = g1h->g1_policy();
1997 g1p->record_concurrent_mark_cleanup_start();
1998
1999 double start = os::elapsedTime();
2000
2001 HeapRegionRemSet::reset_for_cleanup_tasks();
2002
2003 uint n_workers;
2004
2005 // Do counting once more with the world stopped for good measure.
2006 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2007
2008 g1h->set_par_threads();
2009 n_workers = g1h->n_par_threads();
2010 assert(g1h->n_par_threads() == n_workers,
2011 "Should not have been reset");
2012 g1h->workers()->run_task(&g1_par_count_task);
2013 // Done with the parallel phase so reset to 0.
2014 g1h->set_par_threads(0);
2015
2016 if (VerifyDuringGC) {
2081 }
2082
2083 // this will also free any regions totally full of garbage objects,
2084 // and sort the regions.
2085 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2086
2087 // Statistics.
2088 double end = os::elapsedTime();
2089 _cleanup_times.add((end - start) * 1000.0);
2090
2091 if (G1Log::fine()) {
2092 g1h->g1_policy()->print_heap_transition(start_used_bytes);
2093 }
2094
2095 // Clean up will have freed any regions completely full of garbage.
2096 // Update the soft reference policy with the new heap occupancy.
2097 Universe::update_heap_info_at_gc();
2098
2099 if (VerifyDuringGC) {
2100 HandleMark hm; // handle scope
2101 g1h->prepare_for_verify();
2102 Universe::verify(VerifyOption_G1UsePrevMarking,
2103 " VerifyDuringGC:(after)");
2104 }
2105
2106 g1h->check_bitmaps("Cleanup End");
2107
2108 g1h->verify_region_sets_optional();
2109
2110 // We need to make this be a "collection" so any collection pause that
2111 // races with it goes around and waits for completeCleanup to finish.
2112 g1h->increment_total_collections();
2113
2114 // Clean out dead classes and update Metaspace sizes.
2115 if (ClassUnloadingWithConcurrentMark) {
2116 ClassLoaderDataGraph::purge();
2117 }
2118 MetaspaceGC::compute_new_size();
2119
2120 // We reclaimed old regions so we should calculate the sizes to make
2121 // sure we update the old gen/space data.
|