991
992 verifier->verify_region_sets_optional();
993
994 if (VerifyDuringGC) {
995 GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
996
997 size_t const BufLen = 512;
998 char buffer[BufLen];
999
1000 jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1001 verifier->verify(type, vo, buffer);
1002 }
1003
1004 verifier->check_bitmaps(caller);
1005 }
1006
1007 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1008 G1CollectedHeap* _g1h;
1009 G1ConcurrentMark* _cm;
1010
1011 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
1012
1013 void update_remset_before_rebuild(HeapRegion * hr) {
1014 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1015
1016 size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1017 bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1018 if (selected_for_rebuild) {
1019 _num_regions_selected_for_rebuild++;
1020 }
1021 _cm->update_top_at_rebuild_start(hr);
1022 }
1023
1024 public:
1025 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1026 _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
1027
1028 virtual bool do_heap_region(HeapRegion* r) {
1029 update_remset_before_rebuild(r);
1030 return false;
1031 }
1032
1033 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1034 };
1035
1036 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1037 G1CollectedHeap* _g1h;
1038 public:
1039 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1040
1041 virtual bool do_heap_region(HeapRegion* r) {
1042 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1043 return false;
1044 }
1045 };
1046
1047 void G1ConcurrentMark::remark() {
1048 assert_at_safepoint_on_vm_thread();
1049
1066 }
1067
1068 double mark_work_end = os::elapsedTime();
1069
1070 bool const mark_finished = !has_overflown();
1071 if (mark_finished) {
1072 weak_refs_work(false /* clear_all_soft_refs */);
1073
1074 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1075 // We're done with marking.
1076 // This is the end of the marking cycle, we're expected all
1077 // threads to have SATB queues with active set to true.
1078 satb_mq_set.set_active_all_threads(false, /* new active value */
1079 true /* expected_active */);
1080
1081 {
1082 GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1083 flush_all_task_caches();
1084 }
1085
1086 {
1087 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1088 G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1089 _g1h->heap_region_iterate(&cl);
1090 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1091 _g1h->num_regions(), cl.num_selected_for_rebuild());
1092 }
1093
1094 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");
1095
1096 assert(!restart_for_overflow(), "sanity");
1097 // Completely reset the marking state since marking completed
1098 reset_at_marking_complete();
1099 } else {
1100 // We overflowed. Restart concurrent marking.
1101 _restart_for_overflow = true;
1102
1103 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1104
1105 // Clear the marking state because we will be restarting
1106 // marking due to overflowing the global mark stack.
1107 reset_marking_for_restart();
1108 }
1109
1110 {
1111 GCTraceTime(Debug, gc, phases)("Report Object Count");
1112 report_object_count();
1113 }
1114
1115 // Statistics
1116 double now = os::elapsedTime();
1117 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1118 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1119 _remark_times.add((now - start) * 1000.0);
1120
1121 g1p->record_concurrent_mark_remark_end();
1122 }
1123
1124 class G1CleanupTask : public AbstractGangTask {
1125 // Per-region work during the Cleanup pause.
1126 class G1CleanupRegionsClosure : public HeapRegionClosure {
1127 G1CollectedHeap* _g1h;
1128 size_t _freed_bytes;
1129 FreeRegionList* _local_cleanup_list;
1130 uint _old_regions_removed;
1131 uint _humongous_regions_removed;
1132 HRRSCleanupTask* _hrrs_cleanup_task;
1133
1134 public:
1135 G1CleanupRegionsClosure(G1CollectedHeap* g1,
1136 FreeRegionList* local_cleanup_list,
1137 HRRSCleanupTask* hrrs_cleanup_task) :
1138 _g1h(g1),
1139 _freed_bytes(0),
1140 _local_cleanup_list(local_cleanup_list),
1141 _old_regions_removed(0),
1142 _humongous_regions_removed(0),
1143 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1144
1145 size_t freed_bytes() { return _freed_bytes; }
1146 const uint old_regions_removed() { return _old_regions_removed; }
1147 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1148
1149 bool do_heap_region(HeapRegion *hr) {
1150 hr->note_end_of_marking();
1151
1152 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1153 _freed_bytes += hr->used();
1154 hr->set_containing_set(NULL);
1155 if (hr->is_humongous()) {
1156 _humongous_regions_removed++;
1157 _g1h->free_humongous_region(hr, _local_cleanup_list);
1158 } else {
1159 _old_regions_removed++;
1160 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1161 }
1162 hr->clear_cardtable();
1163 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1164 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1165 } else {
1166 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1167 }
1168
1169 return false;
1170 }
1171 };
1226 }
1227 }
1228 // And actually make them available.
1229 _g1h->prepend_to_freelist(&empty_regions_list);
1230 }
1231 }
1232
1233 void G1ConcurrentMark::cleanup() {
1234 assert_at_safepoint_on_vm_thread();
1235
1236 // If a full collection has happened, we shouldn't do this.
1237 if (has_aborted()) {
1238 return;
1239 }
1240
1241 G1Policy* g1p = _g1h->g1_policy();
1242 g1p->record_concurrent_mark_cleanup_start();
1243
1244 double start = os::elapsedTime();
1245
1246 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before");
1247
1248 {
1249 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1250 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1251 _g1h->heap_region_iterate(&cl);
1252 }
1253
1254 if (log_is_enabled(Trace, gc, liveness)) {
1255 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1256 _g1h->heap_region_iterate(&cl);
1257 }
1258
1259 // Install newly created mark bitmap as "prev".
1260 swap_mark_bitmaps();
1261 {
1262 GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1263 reclaim_empty_regions();
1264 }
1265
1266 // Cleanup will have freed any regions completely full of garbage.
1267 // Update the soft reference policy with the new heap occupancy.
1268 Universe::update_heap_info_at_gc();
1269
1270 // Clean out dead classes and update Metaspace sizes.
1271 if (ClassUnloadingWithConcurrentMark) {
1272 GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1273 ClassLoaderDataGraph::purge();
1274 }
1275 MetaspaceGC::compute_new_size();
1276
1277 // We reclaimed old regions so we should calculate the sizes to make
1278 // sure we update the old gen/space data.
1279 _g1h->g1mm()->update_sizes();
1280
1608 if (has_overflown()) {
1609 // We can not trust g1_is_alive if the marking stack overflowed
1610 return;
1611 }
1612
1613 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1614
1615 // Unload Klasses, String, Symbols, Code Cache, etc.
1616 if (ClassUnloadingWithConcurrentMark) {
1617 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1618 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1619 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1620 } else {
1621 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1622 // No need to clean string table and symbol table as they are treated as strong roots when
1623 // class unloading is disabled.
1624 _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1625 }
1626 }
1627
1628 void G1ConcurrentMark::report_object_count() {
1629 G1CMIsAliveClosure is_alive(_g1h);
1630 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1631 }
1632
1633 void G1ConcurrentMark::swap_mark_bitmaps() {
1634 G1CMBitMap* temp = _prev_mark_bitmap;
1635 _prev_mark_bitmap = _next_mark_bitmap;
1636 _next_mark_bitmap = temp;
1637 _g1h->collector_state()->set_clearing_next_bitmap(true);
1638 }
1639
1640 // Closure for marking entries in SATB buffers.
1641 class G1CMSATBBufferClosure : public SATBBufferClosure {
1642 private:
1643 G1CMTask* _task;
1644 G1CollectedHeap* _g1h;
1645
1646 // This is very similar to G1CMTask::deal_with_reference, but with
1647 // more relaxed requirements for the argument, so this must be more
1648 // circumspect about treating the argument as an object.
1649 void do_entry(void* entry) const {
1650 _task->increment_refs_reached();
1651 oop const obj = static_cast<oop>(entry);
|
991
992 verifier->verify_region_sets_optional();
993
994 if (VerifyDuringGC) {
995 GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
996
997 size_t const BufLen = 512;
998 char buffer[BufLen];
999
1000 jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1001 verifier->verify(type, vo, buffer);
1002 }
1003
1004 verifier->check_bitmaps(caller);
1005 }
1006
1007 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1008 G1CollectedHeap* _g1h;
1009 G1ConcurrentMark* _cm;
1010
1011 G1PrintRegionLivenessInfoClosure _cl;
1012
1013 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
1014
1015 void update_remset_before_rebuild(HeapRegion * hr) {
1016 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1017
1018 size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1019 bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1020 if (selected_for_rebuild) {
1021 _num_regions_selected_for_rebuild++;
1022 }
1023 _cm->update_top_at_rebuild_start(hr);
1024 }
1025
1026 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1027 uint const region_idx = hr->hrm_index();
1028 assert(hr->is_starts_humongous(),
1029 "Should not have marked bytes " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1030 marked_words, region_idx, hr->get_type_str());
1031 uint num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(marked_words);
1032
1033 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1034 HeapRegion* const r = _g1h->region_at(i);
1035 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1036 assert(words_to_add > 0, "Out of space to distribute before end of humongous object in region %u (starts %u)", i, region_idx);
1037
1038 r->add_to_marked_bytes(words_to_add * HeapWordSize);
1039 marked_words -= words_to_add;
1040 }
1041 assert(marked_words == 0,
1042 SIZE_FORMAT " words left after distributing space across %u regions",
1043 marked_words, num_regions_in_humongous);
1044 }
1045
1046 void update_marked_bytes(HeapRegion* hr) {
1047 uint const region_idx = hr->hrm_index();
1048 size_t marked_words = _cm->liveness(region_idx);
1049 // The marking attributes the object's size completely to the humongous starts
1050 // region. We need to distribute this value across the entire set of regions a
1051 // humongous object spans.
1052 if (hr->is_humongous()) {
1053 if (marked_words > 0) {
1054 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous start region %u (%s), word size %d (%f)",
1055 marked_words, region_idx, hr->get_type_str(),
1056 oop(hr->bottom())->size(), (double)oop(hr->bottom())->size() / HeapRegion::GrainWords);
1057 distribute_marked_bytes(hr, marked_words);
1058 } else {
1059 assert(marked_words == 0,
1060 "Asked to add " SIZE_FORMAT " words to add to continues humongous region %u (%s)",
1061 marked_words, region_idx, hr->get_type_str());
1062 }
1063 } else {
1064 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1065 hr->add_to_marked_bytes(marked_words * HeapWordSize);
1066 }
1067 }
1068
1069 public:
1070 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1071 _g1h(g1h), _cm(cm), _cl("Post-Marking"), _num_regions_selected_for_rebuild(0) { }
1072
1073 virtual bool do_heap_region(HeapRegion* r) {
1074 update_remset_before_rebuild(r);
1075 update_marked_bytes(r);
1076 if (log_is_enabled(Trace, gc, liveness)) {
1077 _cl.do_heap_region(r);
1078 }
1079 r->note_end_of_marking();
1080 return false;
1081 }
1082
1083 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1084 };
1085
1086 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1087 G1CollectedHeap* _g1h;
1088 public:
1089 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1090
1091 virtual bool do_heap_region(HeapRegion* r) {
1092 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1093 return false;
1094 }
1095 };
1096
1097 void G1ConcurrentMark::remark() {
1098 assert_at_safepoint_on_vm_thread();
1099
1116 }
1117
1118 double mark_work_end = os::elapsedTime();
1119
1120 bool const mark_finished = !has_overflown();
1121 if (mark_finished) {
1122 weak_refs_work(false /* clear_all_soft_refs */);
1123
1124 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1125 // We're done with marking.
1126 // This is the end of the marking cycle, we're expected all
1127 // threads to have SATB queues with active set to true.
1128 satb_mq_set.set_active_all_threads(false, /* new active value */
1129 true /* expected_active */);
1130
1131 {
1132 GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1133 flush_all_task_caches();
1134 }
1135
1136 // Install newly created mark bitmap as "prev".
1137 swap_mark_bitmaps();
1138 {
1139 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1140 G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1141 _g1h->heap_region_iterate(&cl);
1142 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1143 _g1h->num_regions(), cl.num_selected_for_rebuild());
1144 }
1145
1146 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1147
1148 assert(!restart_for_overflow(), "sanity");
1149 // Completely reset the marking state since marking completed
1150 reset_at_marking_complete();
1151 } else {
1152 // We overflowed. Restart concurrent marking.
1153 _restart_for_overflow = true;
1154
1155 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1156
1157 // Clear the marking state because we will be restarting
1158 // marking due to overflowing the global mark stack.
1159 reset_marking_for_restart();
1160 }
1161
1162 {
1163 GCTraceTime(Debug, gc, phases)("Report Object Count");
1164 report_object_count(mark_finished);
1165 }
1166
1167 // Statistics
1168 double now = os::elapsedTime();
1169 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1170 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1171 _remark_times.add((now - start) * 1000.0);
1172
1173 g1p->record_concurrent_mark_remark_end();
1174 }
1175
1176 class G1CleanupTask : public AbstractGangTask {
1177 // Per-region work during the Cleanup pause.
1178 class G1CleanupRegionsClosure : public HeapRegionClosure {
1179 G1CollectedHeap* _g1h;
1180 size_t _freed_bytes;
1181 FreeRegionList* _local_cleanup_list;
1182 uint _old_regions_removed;
1183 uint _humongous_regions_removed;
1184 HRRSCleanupTask* _hrrs_cleanup_task;
1185
1186 public:
1187 G1CleanupRegionsClosure(G1CollectedHeap* g1,
1188 FreeRegionList* local_cleanup_list,
1189 HRRSCleanupTask* hrrs_cleanup_task) :
1190 _g1h(g1),
1191 _freed_bytes(0),
1192 _local_cleanup_list(local_cleanup_list),
1193 _old_regions_removed(0),
1194 _humongous_regions_removed(0),
1195 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1196
1197 size_t freed_bytes() { return _freed_bytes; }
1198 const uint old_regions_removed() { return _old_regions_removed; }
1199 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1200
1201 bool do_heap_region(HeapRegion *hr) {
1202 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1203 _freed_bytes += hr->used();
1204 hr->set_containing_set(NULL);
1205 if (hr->is_humongous()) {
1206 _humongous_regions_removed++;
1207 _g1h->free_humongous_region(hr, _local_cleanup_list);
1208 } else {
1209 _old_regions_removed++;
1210 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1211 }
1212 hr->clear_cardtable();
1213 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1214 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1215 } else {
1216 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1217 }
1218
1219 return false;
1220 }
1221 };
1276 }
1277 }
1278 // And actually make them available.
1279 _g1h->prepend_to_freelist(&empty_regions_list);
1280 }
1281 }
1282
1283 void G1ConcurrentMark::cleanup() {
1284 assert_at_safepoint_on_vm_thread();
1285
1286 // If a full collection has happened, we shouldn't do this.
1287 if (has_aborted()) {
1288 return;
1289 }
1290
1291 G1Policy* g1p = _g1h->g1_policy();
1292 g1p->record_concurrent_mark_cleanup_start();
1293
1294 double start = os::elapsedTime();
1295
1296 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1297
1298 {
1299 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1300 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1301 _g1h->heap_region_iterate(&cl);
1302 }
1303
1304 if (log_is_enabled(Trace, gc, liveness)) {
1305 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1306 _g1h->heap_region_iterate(&cl);
1307 }
1308
1309 {
1310 GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1311 reclaim_empty_regions();
1312 }
1313
1314 // Cleanup will have freed any regions completely full of garbage.
1315 // Update the soft reference policy with the new heap occupancy.
1316 Universe::update_heap_info_at_gc();
1317
1318 // Clean out dead classes and update Metaspace sizes.
1319 if (ClassUnloadingWithConcurrentMark) {
1320 GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1321 ClassLoaderDataGraph::purge();
1322 }
1323 MetaspaceGC::compute_new_size();
1324
1325 // We reclaimed old regions so we should calculate the sizes to make
1326 // sure we update the old gen/space data.
1327 _g1h->g1mm()->update_sizes();
1328
1656 if (has_overflown()) {
1657 // We can not trust g1_is_alive if the marking stack overflowed
1658 return;
1659 }
1660
1661 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1662
1663 // Unload Klasses, String, Symbols, Code Cache, etc.
1664 if (ClassUnloadingWithConcurrentMark) {
1665 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1666 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1667 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1668 } else {
1669 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1670 // No need to clean string table and symbol table as they are treated as strong roots when
1671 // class unloading is disabled.
1672 _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1673 }
1674 }
1675
1676 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1677 // the prev bitmap determining liveness.
1678 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1679 G1CollectedHeap* _g1;
1680 public:
1681 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
1682
1683 bool do_object_b(oop obj) {
1684 HeapWord* addr = (HeapWord*)obj;
1685 return addr != NULL &&
1686 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_dead(obj));
1687 }
1688 };
1689
1690 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1691 // Depending on the completion of the marking liveness needs to be determined
1692 // using either the next or prev bitmap.
1693 if (mark_completed) {
1694 G1ObjectCountIsAliveClosure is_alive(_g1h);
1695 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1696 } else {
1697 G1CMIsAliveClosure is_alive(_g1h);
1698 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1699 }
1700 }
1701
1702
1703 void G1ConcurrentMark::swap_mark_bitmaps() {
1704 G1CMBitMap* temp = _prev_mark_bitmap;
1705 _prev_mark_bitmap = _next_mark_bitmap;
1706 _next_mark_bitmap = temp;
1707 _g1h->collector_state()->set_clearing_next_bitmap(true);
1708 }
1709
1710 // Closure for marking entries in SATB buffers.
1711 class G1CMSATBBufferClosure : public SATBBufferClosure {
1712 private:
1713 G1CMTask* _task;
1714 G1CollectedHeap* _g1h;
1715
1716 // This is very similar to G1CMTask::deal_with_reference, but with
1717 // more relaxed requirements for the argument, so this must be more
1718 // circumspect about treating the argument as an object.
1719 void do_entry(void* entry) const {
1720 _task->increment_refs_reached();
1721 oop const obj = static_cast<oop>(entry);
|