< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 49511 : imported patch 8200234-g1concurrentmark-refactorings
rev 49512 : imported patch 8200234-stefanj-review
rev 49515 : 8200255: Remove G1CMTask::_concurrent
Reviewed-by: sangheki, sjohanss
rev 49516 : 8200074: Remove G1ConcurrentMark::_concurrent_marking_in_progress
Reviewed-by: sjohanss, sangheki
rev 49517 : imported patch 8200305-gc,liveness-output
rev 49518 : imported patch 8200385-prev-bitmap-marks-left
rev 49519 : imported patch 8200385-stefanj-review
rev 49520 : imported patch 8178105-switch-at-remark
rev 49521 : [mq]: 8178105-stefanj-review
rev 49522 : [mq]: 8178105-stefanj-review2


 991 
 992   verifier->verify_region_sets_optional();
 993 
 994   if (VerifyDuringGC) {
 995     GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
 996 
 997     size_t const BufLen = 512;
 998     char buffer[BufLen];
 999 
1000     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1001     verifier->verify(type, vo, buffer);
1002   }
1003 
1004   verifier->check_bitmaps(caller);
1005 }
1006 
1007 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1008   G1CollectedHeap* _g1h;
1009   G1ConcurrentMark* _cm;
1010 


1011   uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1012 
1013   void update_remset_before_rebuild(HeapRegion * hr) {
1014     G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1015 
1016     size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1017     bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1018     if (selected_for_rebuild) {
1019       _num_regions_selected_for_rebuild++;
1020     }
1021     _cm->update_top_at_rebuild_start(hr);
1022   }
1023 







































1024 public:
1025   G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1026     _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
1027 
1028   virtual bool do_heap_region(HeapRegion* r) {
1029     update_remset_before_rebuild(r);





1030     return false;
1031   }
1032 
1033   uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1034 };
1035 
1036 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1037   G1CollectedHeap* _g1h;
1038 public:
1039   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1040 
1041   virtual bool do_heap_region(HeapRegion* r) {
1042     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1043     return false;
1044   }
1045 };
1046 
1047 void G1ConcurrentMark::remark() {
1048   assert_at_safepoint_on_vm_thread();
1049 


1066   }
1067 
1068   double mark_work_end = os::elapsedTime();
1069 
1070   bool const mark_finished = !has_overflown();
1071   if (mark_finished) {
1072     weak_refs_work(false /* clear_all_soft_refs */);
1073 
1074     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1075     // We're done with marking.
1076     // This is the end of the marking cycle, we're expected all
1077     // threads to have SATB queues with active set to true.
1078     satb_mq_set.set_active_all_threads(false, /* new active value */
1079                                        true /* expected_active */);
1080 
1081     {
1082       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1083       flush_all_task_caches();
1084     }
1085 


1086     {
1087       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1088       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1089       _g1h->heap_region_iterate(&cl);
1090       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1091                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1092     }
1093 
1094     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");
1095 
1096     assert(!restart_for_overflow(), "sanity");
1097     // Completely reset the marking state since marking completed
1098     reset_at_marking_complete();
1099   } else {
1100     // We overflowed.  Restart concurrent marking.
1101     _restart_for_overflow = true;
1102 
1103     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1104 
1105     // Clear the marking state because we will be restarting
1106     // marking due to overflowing the global mark stack.
1107     reset_marking_for_restart();
1108   }
1109 
1110   {
1111     GCTraceTime(Debug, gc, phases)("Report Object Count");
1112     report_object_count();
1113   }
1114 
1115   // Statistics
1116   double now = os::elapsedTime();
1117   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1118   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1119   _remark_times.add((now - start) * 1000.0);
1120 
1121   g1p->record_concurrent_mark_remark_end();
1122 }
1123 
1124 class G1CleanupTask : public AbstractGangTask {
1125   // Per-region work during the Cleanup pause.
1126   class G1CleanupRegionsClosure : public HeapRegionClosure {
1127     G1CollectedHeap* _g1h;
1128     size_t _freed_bytes;
1129     FreeRegionList* _local_cleanup_list;
1130     uint _old_regions_removed;
1131     uint _humongous_regions_removed;
1132     HRRSCleanupTask* _hrrs_cleanup_task;
1133 
1134   public:
1135     G1CleanupRegionsClosure(G1CollectedHeap* g1,
1136                             FreeRegionList* local_cleanup_list,
1137                             HRRSCleanupTask* hrrs_cleanup_task) :
1138       _g1h(g1),
1139       _freed_bytes(0),
1140       _local_cleanup_list(local_cleanup_list),
1141       _old_regions_removed(0),
1142       _humongous_regions_removed(0),
1143       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1144 
1145     size_t freed_bytes() { return _freed_bytes; }
1146     const uint old_regions_removed() { return _old_regions_removed; }
1147     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1148 
1149     bool do_heap_region(HeapRegion *hr) {
1150       hr->note_end_of_marking();
1151 
1152       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1153         _freed_bytes += hr->used();
1154         hr->set_containing_set(NULL);
1155         if (hr->is_humongous()) {
1156           _humongous_regions_removed++;
1157           _g1h->free_humongous_region(hr, _local_cleanup_list);
1158         } else {
1159           _old_regions_removed++;
1160           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1161         }
1162         hr->clear_cardtable();
1163         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1164         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1165       } else {
1166         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1167       }
1168 
1169       return false;
1170     }
1171   };


1226       }
1227     }
1228     // And actually make them available.
1229     _g1h->prepend_to_freelist(&empty_regions_list);
1230   }
1231 }
1232 
1233 void G1ConcurrentMark::cleanup() {
1234   assert_at_safepoint_on_vm_thread();
1235 
1236   // If a full collection has happened, we shouldn't do this.
1237   if (has_aborted()) {
1238     return;
1239   }
1240 
1241   G1Policy* g1p = _g1h->g1_policy();
1242   g1p->record_concurrent_mark_cleanup_start();
1243 
1244   double start = os::elapsedTime();
1245 
1246   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before");
1247 
1248   {
1249     GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1250     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1251     _g1h->heap_region_iterate(&cl);
1252   }
1253 
1254   if (log_is_enabled(Trace, gc, liveness)) {
1255     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1256     _g1h->heap_region_iterate(&cl);
1257   }
1258 
1259   // Install newly created mark bitmap as "prev".
1260   swap_mark_bitmaps();
1261   {
1262     GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1263     reclaim_empty_regions();
1264   }
1265 
1266   // Cleanup will have freed any regions completely full of garbage.
1267   // Update the soft reference policy with the new heap occupancy.
1268   Universe::update_heap_info_at_gc();
1269 
1270   // Clean out dead classes and update Metaspace sizes.
1271   if (ClassUnloadingWithConcurrentMark) {
1272     GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1273     ClassLoaderDataGraph::purge();
1274   }
1275   MetaspaceGC::compute_new_size();
1276 
1277   // We reclaimed old regions so we should calculate the sizes to make
1278   // sure we update the old gen/space data.
1279   _g1h->g1mm()->update_sizes();
1280 


1608   if (has_overflown()) {
1609     // We can not trust g1_is_alive if the marking stack overflowed
1610     return;
1611   }
1612 
1613   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1614 
1615   // Unload Klasses, String, Symbols, Code Cache, etc.
1616   if (ClassUnloadingWithConcurrentMark) {
1617     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1618     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1619     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1620   } else {
1621     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1622     // No need to clean string table and symbol table as they are treated as strong roots when
1623     // class unloading is disabled.
1624     _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1625   }
1626 }
1627 
1628 void G1ConcurrentMark::report_object_count() {




















1629   G1CMIsAliveClosure is_alive(_g1h);
1630   _gc_tracer_cm->report_object_count_after_gc(&is_alive);

1631 }

1632 
1633 void G1ConcurrentMark::swap_mark_bitmaps() {
1634   G1CMBitMap* temp = _prev_mark_bitmap;
1635   _prev_mark_bitmap = _next_mark_bitmap;
1636   _next_mark_bitmap = temp;
1637   _g1h->collector_state()->set_clearing_next_bitmap(true);
1638 }
1639 
1640 // Closure for marking entries in SATB buffers.
1641 class G1CMSATBBufferClosure : public SATBBufferClosure {
1642 private:
1643   G1CMTask* _task;
1644   G1CollectedHeap* _g1h;
1645 
1646   // This is very similar to G1CMTask::deal_with_reference, but with
1647   // more relaxed requirements for the argument, so this must be more
1648   // circumspect about treating the argument as an object.
1649   void do_entry(void* entry) const {
1650     _task->increment_refs_reached();
1651     oop const obj = static_cast<oop>(entry);




 991 
 992   verifier->verify_region_sets_optional();
 993 
 994   if (VerifyDuringGC) {
 995     GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
 996 
 997     size_t const BufLen = 512;
 998     char buffer[BufLen];
 999 
1000     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1001     verifier->verify(type, vo, buffer);
1002   }
1003 
1004   verifier->check_bitmaps(caller);
1005 }
1006 
1007 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1008   G1CollectedHeap* _g1h;
1009   G1ConcurrentMark* _cm;
1010 
1011   G1PrintRegionLivenessInfoClosure _cl;
1012 
1013   uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1014 
1015   void update_remset_before_rebuild(HeapRegion * hr) {
1016     G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1017 
1018     size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1019     bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1020     if (selected_for_rebuild) {
1021       _num_regions_selected_for_rebuild++;
1022     }
1023     _cm->update_top_at_rebuild_start(hr);
1024   }
1025 
1026   void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1027     uint const region_idx = hr->hrm_index();
1028     uint num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(marked_words);
1029 
1030     for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1031       HeapRegion* const r = _g1h->region_at(i);
1032       size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1033       assert(words_to_add > 0, "Out of space to distribute before end of humongous object in region %u (starts %u)", i, region_idx);
1034 
1035       log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 
1036                              words_to_add, i, r->get_type_str());
1037       r->add_to_marked_bytes(words_to_add * HeapWordSize);
1038       marked_words -= words_to_add;
1039     }
1040     assert(marked_words == 0,
1041            SIZE_FORMAT " words left after distributing space across %u regions",
1042            marked_words, num_regions_in_humongous);
1043   }
1044 
1045   void update_marked_bytes(HeapRegion* hr) {
1046     uint const region_idx = hr->hrm_index();
1047     size_t marked_words = _cm->liveness(region_idx);
1048     // The marking attributes the object's size completely to the humongous starts
1049     // region. We need to distribute this value across the entire set of regions a
1050     // humongous object spans.
1051     if (hr->is_humongous()) {
1052       assert(hr->is_starts_humongous() || marked_words == 0,
1053              "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1054              marked_words, region_idx, hr->get_type_str());
1055 
1056       if (marked_words > 0) {
1057         distribute_marked_bytes(hr, marked_words);
1058       }
1059     } else {
1060       log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1061       hr->add_to_marked_bytes(marked_words * HeapWordSize);
1062     }
1063   }
1064 
1065 public:
1066   G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1067     _g1h(g1h), _cm(cm), _cl("Post-Marking"), _num_regions_selected_for_rebuild(0) { }
1068 
1069   virtual bool do_heap_region(HeapRegion* r) {
1070     update_remset_before_rebuild(r);
1071     update_marked_bytes(r);
1072     if (log_is_enabled(Trace, gc, liveness)) {
1073       _cl.do_heap_region(r);
1074     }
1075     r->note_end_of_marking();
1076     return false;
1077   }
1078 
1079   uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1080 };
1081 
1082 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1083   G1CollectedHeap* _g1h;
1084 public:
1085   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1086 
1087   virtual bool do_heap_region(HeapRegion* r) {
1088     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1089     return false;
1090   }
1091 };
1092 
1093 void G1ConcurrentMark::remark() {
1094   assert_at_safepoint_on_vm_thread();
1095 


1112   }
1113 
1114   double mark_work_end = os::elapsedTime();
1115 
1116   bool const mark_finished = !has_overflown();
1117   if (mark_finished) {
1118     weak_refs_work(false /* clear_all_soft_refs */);
1119 
1120     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1121     // We're done with marking.
1122     // This is the end of the marking cycle, we're expected all
1123     // threads to have SATB queues with active set to true.
1124     satb_mq_set.set_active_all_threads(false, /* new active value */
1125                                        true /* expected_active */);
1126 
1127     {
1128       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1129       flush_all_task_caches();
1130     }
1131 
1132     // Install newly created mark bitmap as "prev".
1133     swap_mark_bitmaps();
1134     {
1135       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1136       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1137       _g1h->heap_region_iterate(&cl);
1138       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1139                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1140     }
1141 
1142     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1143 
1144     assert(!restart_for_overflow(), "sanity");
1145     // Completely reset the marking state since marking completed
1146     reset_at_marking_complete();
1147   } else {
1148     // We overflowed.  Restart concurrent marking.
1149     _restart_for_overflow = true;
1150 
1151     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1152 
1153     // Clear the marking state because we will be restarting
1154     // marking due to overflowing the global mark stack.
1155     reset_marking_for_restart();
1156   }
1157 
1158   {
1159     GCTraceTime(Debug, gc, phases)("Report Object Count");
1160     report_object_count(mark_finished);
1161   }
1162 
1163   // Statistics
1164   double now = os::elapsedTime();
1165   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1166   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1167   _remark_times.add((now - start) * 1000.0);
1168 
1169   g1p->record_concurrent_mark_remark_end();
1170 }
1171 
1172 class G1CleanupTask : public AbstractGangTask {
1173   // Per-region work during the Cleanup pause.
1174   class G1CleanupRegionsClosure : public HeapRegionClosure {
1175     G1CollectedHeap* _g1h;
1176     size_t _freed_bytes;
1177     FreeRegionList* _local_cleanup_list;
1178     uint _old_regions_removed;
1179     uint _humongous_regions_removed;
1180     HRRSCleanupTask* _hrrs_cleanup_task;
1181 
1182   public:
1183     G1CleanupRegionsClosure(G1CollectedHeap* g1,
1184                             FreeRegionList* local_cleanup_list,
1185                             HRRSCleanupTask* hrrs_cleanup_task) :
1186       _g1h(g1),
1187       _freed_bytes(0),
1188       _local_cleanup_list(local_cleanup_list),
1189       _old_regions_removed(0),
1190       _humongous_regions_removed(0),
1191       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1192 
1193     size_t freed_bytes() { return _freed_bytes; }
1194     const uint old_regions_removed() { return _old_regions_removed; }
1195     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1196 
1197     bool do_heap_region(HeapRegion *hr) {


1198       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1199         _freed_bytes += hr->used();
1200         hr->set_containing_set(NULL);
1201         if (hr->is_humongous()) {
1202           _humongous_regions_removed++;
1203           _g1h->free_humongous_region(hr, _local_cleanup_list);
1204         } else {
1205           _old_regions_removed++;
1206           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1207         }
1208         hr->clear_cardtable();
1209         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1210         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1211       } else {
1212         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1213       }
1214 
1215       return false;
1216     }
1217   };


1272       }
1273     }
1274     // And actually make them available.
1275     _g1h->prepend_to_freelist(&empty_regions_list);
1276   }
1277 }
1278 
1279 void G1ConcurrentMark::cleanup() {
1280   assert_at_safepoint_on_vm_thread();
1281 
1282   // If a full collection has happened, we shouldn't do this.
1283   if (has_aborted()) {
1284     return;
1285   }
1286 
1287   G1Policy* g1p = _g1h->g1_policy();
1288   g1p->record_concurrent_mark_cleanup_start();
1289 
1290   double start = os::elapsedTime();
1291 
1292   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1293 
1294   {
1295     GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1296     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1297     _g1h->heap_region_iterate(&cl);
1298   }
1299 
1300   if (log_is_enabled(Trace, gc, liveness)) {
1301     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1302     _g1h->heap_region_iterate(&cl);
1303   }
1304 


1305   {
1306     GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1307     reclaim_empty_regions();
1308   }
1309 
1310   // Cleanup will have freed any regions completely full of garbage.
1311   // Update the soft reference policy with the new heap occupancy.
1312   Universe::update_heap_info_at_gc();
1313 
1314   // Clean out dead classes and update Metaspace sizes.
1315   if (ClassUnloadingWithConcurrentMark) {
1316     GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1317     ClassLoaderDataGraph::purge();
1318   }
1319   MetaspaceGC::compute_new_size();
1320 
1321   // We reclaimed old regions so we should calculate the sizes to make
1322   // sure we update the old gen/space data.
1323   _g1h->g1mm()->update_sizes();
1324 


1652   if (has_overflown()) {
1653     // We can not trust g1_is_alive if the marking stack overflowed
1654     return;
1655   }
1656 
1657   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1658 
1659   // Unload Klasses, String, Symbols, Code Cache, etc.
1660   if (ClassUnloadingWithConcurrentMark) {
1661     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1662     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1663     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1664   } else {
1665     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1666     // No need to clean string table and symbol table as they are treated as strong roots when
1667     // class unloading is disabled.
1668     _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1669   }
1670 }
1671 
1672 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1673 // the prev bitmap determining liveness.
1674 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1675   G1CollectedHeap* _g1;
1676  public:
1677   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
1678 
1679   bool do_object_b(oop obj) {
1680     HeapWord* addr = (HeapWord*)obj;
1681     return addr != NULL &&
1682            (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_dead(obj));
1683   }
1684 };
1685 
1686 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1687   // Depending on the completion of the marking liveness needs to be determined
1688   // using either the next or prev bitmap.
1689   if (mark_completed) {
1690     G1ObjectCountIsAliveClosure is_alive(_g1h);
1691     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1692   } else {
1693     G1CMIsAliveClosure is_alive(_g1h);
1694     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1695   }
1696 }
1697 
1698 
1699 void G1ConcurrentMark::swap_mark_bitmaps() {
1700   G1CMBitMap* temp = _prev_mark_bitmap;
1701   _prev_mark_bitmap = _next_mark_bitmap;
1702   _next_mark_bitmap = temp;
1703   _g1h->collector_state()->set_clearing_next_bitmap(true);
1704 }
1705 
1706 // Closure for marking entries in SATB buffers.
1707 class G1CMSATBBufferClosure : public SATBBufferClosure {
1708 private:
1709   G1CMTask* _task;
1710   G1CollectedHeap* _g1h;
1711 
1712   // This is very similar to G1CMTask::deal_with_reference, but with
1713   // more relaxed requirements for the argument, so this must be more
1714   // circumspect about treating the argument as an object.
1715   void do_entry(void* entry) const {
1716     _task->increment_refs_reached();
1717     oop const obj = static_cast<oop>(entry);


< prev index next >