1045 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1046 // world is stopped at this checkpoint
1047 assert(SafepointSynchronize::is_at_safepoint(),
1048 "world should be stopped");
1049
1050 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1051
1052 // If a full collection has happened, we shouldn't do this.
1053 if (has_aborted()) {
1054 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1055 return;
1056 }
1057
1058 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1059
1060 if (VerifyDuringGC) {
1061 HandleMark hm; // handle scope
1062 g1h->prepare_for_verify();
1063 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1064 }
1065 g1h->check_bitmaps("Remark Start");
1066
1067 G1CollectorPolicy* g1p = g1h->g1_policy();
1068 g1p->record_concurrent_mark_remark_start();
1069
1070 double start = os::elapsedTime();
1071
1072 checkpointRootsFinalWork();
1073
1074 double mark_work_end = os::elapsedTime();
1075
1076 weakRefsWork(clear_all_soft_refs);
1077
1078 if (has_overflown()) {
1079 // Oops. We overflowed. Restart concurrent marking.
1080 _restart_for_overflow = true;
1081 log_develop_trace(gc)("Remark led to restart for overflow.");
1082
1083 // Verify the heap w.r.t. the previous marking bitmap.
1084 if (VerifyDuringGC) {
1085 HandleMark hm; // handle scope
1094 {
1095 GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm());
1096
1097 // Aggregate the per-task counting data that we have accumulated
1098 // while marking.
1099 aggregate_count_data();
1100 }
1101
1102 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1103 // We're done with marking.
1104 // This is the end of the marking cycle, we're expected all
1105 // threads to have SATB queues with active set to true.
1106 satb_mq_set.set_active_all_threads(false, /* new active value */
1107 true /* expected_active */);
1108
1109 if (VerifyDuringGC) {
1110 HandleMark hm; // handle scope
1111 g1h->prepare_for_verify();
1112 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1113 }
1114 g1h->check_bitmaps("Remark End");
1115 assert(!restart_for_overflow(), "sanity");
1116 // Completely reset the marking state since marking completed
1117 set_non_marking_state();
1118 }
1119
1120 // Expand the marking stack, if we have to and if we can.
1121 if (_markStack.should_expand()) {
1122 _markStack.expand();
1123 }
1124
1125 // Statistics
1126 double now = os::elapsedTime();
1127 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1128 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1129 _remark_times.add((now - start) * 1000.0);
1130
1131 g1p->record_concurrent_mark_remark_end();
1132
1133 G1CMIsAliveClosure is_alive(g1h);
1134 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1588 _cleanup_list->add_ordered(&local_cleanup_list);
1589 assert(local_cleanup_list.is_empty(), "post-condition");
1590
1591 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1592 }
1593 }
1594 };
1595
1596 void ConcurrentMark::cleanup() {
1597 // world is stopped at this checkpoint
1598 assert(SafepointSynchronize::is_at_safepoint(),
1599 "world should be stopped");
1600 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1601
1602 // If a full collection has happened, we shouldn't do this.
1603 if (has_aborted()) {
1604 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1605 return;
1606 }
1607
1608 g1h->verify_region_sets_optional();
1609
1610 if (VerifyDuringGC) {
1611 HandleMark hm; // handle scope
1612 g1h->prepare_for_verify();
1613 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1614 }
1615 g1h->check_bitmaps("Cleanup Start");
1616
1617 G1CollectorPolicy* g1p = g1h->g1_policy();
1618 g1p->record_concurrent_mark_cleanup_start();
1619
1620 double start = os::elapsedTime();
1621
1622 HeapRegionRemSet::reset_for_cleanup_tasks();
1623
1624 // Do counting once more with the world stopped for good measure.
1625 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1626
1627 g1h->workers()->run_task(&g1_par_count_task);
1628
1629 if (VerifyDuringGC) {
1630 // Verify that the counting data accumulated during marking matches
1631 // that calculated by walking the marking bitmap.
1632
1633 // Bitmaps to hold expected values
1634 BitMap expected_region_bm(_region_bm.size(), true);
1635 BitMap expected_card_bm(_card_bm.size(), true);
1685 }
1686
1687 // this will also free any regions totally full of garbage objects,
1688 // and sort the regions.
1689 g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1690
1691 // Statistics.
1692 double end = os::elapsedTime();
1693 _cleanup_times.add((end - start) * 1000.0);
1694
1695 // Clean up will have freed any regions completely full of garbage.
1696 // Update the soft reference policy with the new heap occupancy.
1697 Universe::update_heap_info_at_gc();
1698
1699 if (VerifyDuringGC) {
1700 HandleMark hm; // handle scope
1701 g1h->prepare_for_verify();
1702 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1703 }
1704
1705 g1h->check_bitmaps("Cleanup End");
1706
1707 g1h->verify_region_sets_optional();
1708
1709 // We need to make this be a "collection" so any collection pause that
1710 // races with it goes around and waits for completeCleanup to finish.
1711 g1h->increment_total_collections();
1712
1713 // Clean out dead classes and update Metaspace sizes.
1714 if (ClassUnloadingWithConcurrentMark) {
1715 ClassLoaderDataGraph::purge();
1716 }
1717 MetaspaceGC::compute_new_size();
1718
1719 // We reclaimed old regions so we should calculate the sizes to make
1720 // sure we update the old gen/space data.
1721 g1h->g1mm()->update_sizes();
1722 g1h->allocation_context_stats().update_after_mark();
1723
1724 g1h->trace_heap_after_concurrent_cycle();
1725 }
1726
1727 void ConcurrentMark::completeCleanup() {
|
1045 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1046 // world is stopped at this checkpoint
1047 assert(SafepointSynchronize::is_at_safepoint(),
1048 "world should be stopped");
1049
1050 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1051
1052 // If a full collection has happened, we shouldn't do this.
1053 if (has_aborted()) {
1054 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1055 return;
1056 }
1057
1058 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1059
1060 if (VerifyDuringGC) {
1061 HandleMark hm; // handle scope
1062 g1h->prepare_for_verify();
1063 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1064 }
1065 g1h->verifier()->check_bitmaps("Remark Start");
1066
1067 G1CollectorPolicy* g1p = g1h->g1_policy();
1068 g1p->record_concurrent_mark_remark_start();
1069
1070 double start = os::elapsedTime();
1071
1072 checkpointRootsFinalWork();
1073
1074 double mark_work_end = os::elapsedTime();
1075
1076 weakRefsWork(clear_all_soft_refs);
1077
1078 if (has_overflown()) {
1079 // Oops. We overflowed. Restart concurrent marking.
1080 _restart_for_overflow = true;
1081 log_develop_trace(gc)("Remark led to restart for overflow.");
1082
1083 // Verify the heap w.r.t. the previous marking bitmap.
1084 if (VerifyDuringGC) {
1085 HandleMark hm; // handle scope
1094 {
1095 GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm());
1096
1097 // Aggregate the per-task counting data that we have accumulated
1098 // while marking.
1099 aggregate_count_data();
1100 }
1101
1102 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1103 // We're done with marking.
1104 // This is the end of the marking cycle, we're expected all
1105 // threads to have SATB queues with active set to true.
1106 satb_mq_set.set_active_all_threads(false, /* new active value */
1107 true /* expected_active */);
1108
1109 if (VerifyDuringGC) {
1110 HandleMark hm; // handle scope
1111 g1h->prepare_for_verify();
1112 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1113 }
1114 g1h->verifier()->check_bitmaps("Remark End");
1115 assert(!restart_for_overflow(), "sanity");
1116 // Completely reset the marking state since marking completed
1117 set_non_marking_state();
1118 }
1119
1120 // Expand the marking stack, if we have to and if we can.
1121 if (_markStack.should_expand()) {
1122 _markStack.expand();
1123 }
1124
1125 // Statistics
1126 double now = os::elapsedTime();
1127 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1128 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1129 _remark_times.add((now - start) * 1000.0);
1130
1131 g1p->record_concurrent_mark_remark_end();
1132
1133 G1CMIsAliveClosure is_alive(g1h);
1134 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1588 _cleanup_list->add_ordered(&local_cleanup_list);
1589 assert(local_cleanup_list.is_empty(), "post-condition");
1590
1591 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1592 }
1593 }
1594 };
1595
1596 void ConcurrentMark::cleanup() {
1597 // world is stopped at this checkpoint
1598 assert(SafepointSynchronize::is_at_safepoint(),
1599 "world should be stopped");
1600 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1601
1602 // If a full collection has happened, we shouldn't do this.
1603 if (has_aborted()) {
1604 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1605 return;
1606 }
1607
1608 g1h->verifier()->verify_region_sets_optional();
1609
1610 if (VerifyDuringGC) {
1611 HandleMark hm; // handle scope
1612 g1h->prepare_for_verify();
1613 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1614 }
1615 g1h->verifier()->check_bitmaps("Cleanup Start");
1616
1617 G1CollectorPolicy* g1p = g1h->g1_policy();
1618 g1p->record_concurrent_mark_cleanup_start();
1619
1620 double start = os::elapsedTime();
1621
1622 HeapRegionRemSet::reset_for_cleanup_tasks();
1623
1624 // Do counting once more with the world stopped for good measure.
1625 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1626
1627 g1h->workers()->run_task(&g1_par_count_task);
1628
1629 if (VerifyDuringGC) {
1630 // Verify that the counting data accumulated during marking matches
1631 // that calculated by walking the marking bitmap.
1632
1633 // Bitmaps to hold expected values
1634 BitMap expected_region_bm(_region_bm.size(), true);
1635 BitMap expected_card_bm(_card_bm.size(), true);
1685 }
1686
1687 // this will also free any regions totally full of garbage objects,
1688 // and sort the regions.
1689 g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1690
1691 // Statistics.
1692 double end = os::elapsedTime();
1693 _cleanup_times.add((end - start) * 1000.0);
1694
1695 // Clean up will have freed any regions completely full of garbage.
1696 // Update the soft reference policy with the new heap occupancy.
1697 Universe::update_heap_info_at_gc();
1698
1699 if (VerifyDuringGC) {
1700 HandleMark hm; // handle scope
1701 g1h->prepare_for_verify();
1702 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1703 }
1704
1705 g1h->verifier()->check_bitmaps("Cleanup End");
1706
1707 g1h->verifier()->verify_region_sets_optional();
1708
1709 // We need to make this be a "collection" so any collection pause that
1710 // races with it goes around and waits for completeCleanup to finish.
1711 g1h->increment_total_collections();
1712
1713 // Clean out dead classes and update Metaspace sizes.
1714 if (ClassUnloadingWithConcurrentMark) {
1715 ClassLoaderDataGraph::purge();
1716 }
1717 MetaspaceGC::compute_new_size();
1718
1719 // We reclaimed old regions so we should calculate the sizes to make
1720 // sure we update the old gen/space data.
1721 g1h->g1mm()->update_sizes();
1722 g1h->allocation_context_stats().update_after_mark();
1723
1724 g1h->trace_heap_after_concurrent_cycle();
1725 }
1726
1727 void ConcurrentMark::completeCleanup() {
|