< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 49536 : imported patch 8200305-gc,liveness-output
rev 49537 : imported patch 8200385-prev-bitmap-marks-left
rev 49538 : imported patch 8200385-stefanj-review
rev 49539 : imported patch 8178105-switch-at-remark
rev 49540 : imported patch 8178105-stefanj-review
rev 49541 : imported patch 8178105-stefanj-review2


 993 
 994   verifier->verify_region_sets_optional();
 995 
 996   if (VerifyDuringGC) {
 997     GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
 998 
 999     size_t const BufLen = 512;
1000     char buffer[BufLen];
1001 
1002     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1003     verifier->verify(type, vo, buffer);
1004   }
1005 
1006   verifier->check_bitmaps(caller);
1007 }
1008 
1009 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1010   G1CollectedHeap* _g1h;
1011   G1ConcurrentMark* _cm;
1012 


1013   uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1014 
1015   void update_remset_before_rebuild(HeapRegion * hr) {
1016     G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1017 
1018     size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1019     bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1020     if (selected_for_rebuild) {
1021       _num_regions_selected_for_rebuild++;
1022     }
1023     _cm->update_top_at_rebuild_start(hr);
1024   }
1025 







































1026 public:
1027   G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1028     _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
1029 
1030   virtual bool do_heap_region(HeapRegion* r) {
1031     update_remset_before_rebuild(r);





1032     return false;
1033   }
1034 
1035   uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1036 };
1037 
1038 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1039   G1CollectedHeap* _g1h;
1040 public:
1041   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1042 
1043   virtual bool do_heap_region(HeapRegion* r) {
1044     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1045     return false;
1046   }
1047 };
1048 
1049 void G1ConcurrentMark::remark() {
1050   assert_at_safepoint_on_vm_thread();
1051 


1068   }
1069 
1070   double mark_work_end = os::elapsedTime();
1071 
1072   bool const mark_finished = !has_overflown();
1073   if (mark_finished) {
1074     weak_refs_work(false /* clear_all_soft_refs */);
1075 
1076     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1077     // We're done with marking.
1078     // This is the end of the marking cycle, we're expected all
1079     // threads to have SATB queues with active set to true.
1080     satb_mq_set.set_active_all_threads(false, /* new active value */
1081                                        true /* expected_active */);
1082 
1083     {
1084       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1085       flush_all_task_caches();
1086     }
1087 


1088     {
1089       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1090       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1091       _g1h->heap_region_iterate(&cl);
1092       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1093                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1094     }
1095 
1096     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");
1097 
1098     assert(!restart_for_overflow(), "sanity");
1099     // Completely reset the marking state since marking completed
1100     reset_at_marking_complete();
1101   } else {
1102     // We overflowed.  Restart concurrent marking.
1103     _restart_for_overflow = true;
1104 
1105     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1106 
1107     // Clear the marking state because we will be restarting
1108     // marking due to overflowing the global mark stack.
1109     reset_marking_for_restart();
1110   }
1111 
1112   {
1113     GCTraceTime(Debug, gc, phases)("Report Object Count");
1114     report_object_count();
1115   }
1116 
1117   // Statistics
1118   double now = os::elapsedTime();
1119   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1120   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1121   _remark_times.add((now - start) * 1000.0);
1122 
1123   g1p->record_concurrent_mark_remark_end();
1124 }
1125 
1126 class G1CleanupTask : public AbstractGangTask {
1127   // Per-region work during the Cleanup pause.
1128   class G1CleanupRegionsClosure : public HeapRegionClosure {
1129     G1CollectedHeap* _g1h;
1130     size_t _freed_bytes;
1131     FreeRegionList* _local_cleanup_list;
1132     uint _old_regions_removed;
1133     uint _humongous_regions_removed;
1134     HRRSCleanupTask* _hrrs_cleanup_task;
1135 
1136   public:
1137     G1CleanupRegionsClosure(G1CollectedHeap* g1,
1138                             FreeRegionList* local_cleanup_list,
1139                             HRRSCleanupTask* hrrs_cleanup_task) :
1140       _g1h(g1),
1141       _freed_bytes(0),
1142       _local_cleanup_list(local_cleanup_list),
1143       _old_regions_removed(0),
1144       _humongous_regions_removed(0),
1145       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1146 
1147     size_t freed_bytes() { return _freed_bytes; }
1148     const uint old_regions_removed() { return _old_regions_removed; }
1149     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1150 
1151     bool do_heap_region(HeapRegion *hr) {
1152       hr->note_end_of_marking();
1153 
1154       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1155         _freed_bytes += hr->used();
1156         hr->set_containing_set(NULL);
1157         if (hr->is_humongous()) {
1158           _humongous_regions_removed++;
1159           _g1h->free_humongous_region(hr, _local_cleanup_list);
1160         } else {
1161           _old_regions_removed++;
1162           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1163         }
1164         hr->clear_cardtable();
1165         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1166         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1167       } else {
1168         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1169       }
1170 
1171       return false;
1172     }
1173   };


1228       }
1229     }
1230     // And actually make them available.
1231     _g1h->prepend_to_freelist(&empty_regions_list);
1232   }
1233 }
1234 
1235 void G1ConcurrentMark::cleanup() {
1236   assert_at_safepoint_on_vm_thread();
1237 
1238   // If a full collection has happened, we shouldn't do this.
1239   if (has_aborted()) {
1240     return;
1241   }
1242 
1243   G1Policy* g1p = _g1h->g1_policy();
1244   g1p->record_concurrent_mark_cleanup_start();
1245 
1246   double start = os::elapsedTime();
1247 
1248   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before");
1249 
1250   {
1251     GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1252     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1253     _g1h->heap_region_iterate(&cl);
1254   }
1255 
1256   if (log_is_enabled(Trace, gc, liveness)) {
1257     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1258     _g1h->heap_region_iterate(&cl);
1259   }
1260 
1261   // Install newly created mark bitmap as "prev".
1262   swap_mark_bitmaps();
1263   {
1264     GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1265     reclaim_empty_regions();
1266   }
1267 
1268   // Cleanup will have freed any regions completely full of garbage.
1269   // Update the soft reference policy with the new heap occupancy.
1270   Universe::update_heap_info_at_gc();
1271 
1272   // Clean out dead classes and update Metaspace sizes.
1273   if (ClassUnloadingWithConcurrentMark) {
1274     GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1275     ClassLoaderDataGraph::purge();
1276   }
1277   MetaspaceGC::compute_new_size();
1278 
1279   // We reclaimed old regions so we should calculate the sizes to make
1280   // sure we update the old gen/space data.
1281   _g1h->g1mm()->update_sizes();
1282 


1610   if (has_overflown()) {
1611     // We can not trust g1_is_alive if the marking stack overflowed
1612     return;
1613   }
1614 
1615   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1616 
1617   // Unload Klasses, String, Symbols, Code Cache, etc.
1618   if (ClassUnloadingWithConcurrentMark) {
1619     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1620     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1621     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1622   } else {
1623     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1624     // No need to clean string table and symbol table as they are treated as strong roots when
1625     // class unloading is disabled.
1626     _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1627   }
1628 }
1629 
1630 void G1ConcurrentMark::report_object_count() {




















1631   G1CMIsAliveClosure is_alive(_g1h);
1632   _gc_tracer_cm->report_object_count_after_gc(&is_alive);

1633 }

1634 
1635 void G1ConcurrentMark::swap_mark_bitmaps() {
1636   G1CMBitMap* temp = _prev_mark_bitmap;
1637   _prev_mark_bitmap = _next_mark_bitmap;
1638   _next_mark_bitmap = temp;
1639   _g1h->collector_state()->set_clearing_next_bitmap(true);
1640 }
1641 
1642 // Closure for marking entries in SATB buffers.
1643 class G1CMSATBBufferClosure : public SATBBufferClosure {
1644 private:
1645   G1CMTask* _task;
1646   G1CollectedHeap* _g1h;
1647 
1648   // This is very similar to G1CMTask::deal_with_reference, but with
1649   // more relaxed requirements for the argument, so this must be more
1650   // circumspect about treating the argument as an object.
1651   void do_entry(void* entry) const {
1652     _task->increment_refs_reached();
1653     oop const obj = static_cast<oop>(entry);




 993 
 994   verifier->verify_region_sets_optional();
 995 
 996   if (VerifyDuringGC) {
 997     GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
 998 
 999     size_t const BufLen = 512;
1000     char buffer[BufLen];
1001 
1002     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1003     verifier->verify(type, vo, buffer);
1004   }
1005 
1006   verifier->check_bitmaps(caller);
1007 }
1008 
1009 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1010   G1CollectedHeap* _g1h;
1011   G1ConcurrentMark* _cm;
1012 
1013   G1PrintRegionLivenessInfoClosure _cl;
1014 
1015   uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1016 
1017   void update_remset_before_rebuild(HeapRegion * hr) {
1018     G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1019 
1020     size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1021     bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1022     if (selected_for_rebuild) {
1023       _num_regions_selected_for_rebuild++;
1024     }
1025     _cm->update_top_at_rebuild_start(hr);
1026   }
1027 
1028   void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1029     uint const region_idx = hr->hrm_index();
1030     uint num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(marked_words);
1031 
1032     for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1033       HeapRegion* const r = _g1h->region_at(i);
1034       size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1035       assert(words_to_add > 0, "Out of space to distribute before end of humongous object in region %u (starts %u)", i, region_idx);
1036 
1037       log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 
1038                              words_to_add, i, r->get_type_str());
1039       r->add_to_marked_bytes(words_to_add * HeapWordSize);
1040       marked_words -= words_to_add;
1041     }
1042     assert(marked_words == 0,
1043            SIZE_FORMAT " words left after distributing space across %u regions",
1044            marked_words, num_regions_in_humongous);
1045   }
1046 
1047   void update_marked_bytes(HeapRegion* hr) {
1048     uint const region_idx = hr->hrm_index();
1049     size_t marked_words = _cm->liveness(region_idx);
1050     // The marking attributes the object's size completely to the humongous starts
1051     // region. We need to distribute this value across the entire set of regions a
1052     // humongous object spans.
1053     if (hr->is_humongous()) {
1054       assert(hr->is_starts_humongous() || marked_words == 0,
1055              "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1056              marked_words, region_idx, hr->get_type_str());
1057 
1058       if (marked_words > 0) {
1059         distribute_marked_bytes(hr, marked_words);
1060       }
1061     } else {
1062       log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1063       hr->add_to_marked_bytes(marked_words * HeapWordSize);
1064     }
1065   }
1066 
1067 public:
1068   G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1069     _g1h(g1h), _cm(cm), _cl("Post-Marking"), _num_regions_selected_for_rebuild(0) { }
1070 
1071   virtual bool do_heap_region(HeapRegion* r) {
1072     update_remset_before_rebuild(r);
1073     update_marked_bytes(r);
1074     if (log_is_enabled(Trace, gc, liveness)) {
1075       _cl.do_heap_region(r);
1076     }
1077     r->note_end_of_marking();
1078     return false;
1079   }
1080 
1081   uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1082 };
1083 
1084 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1085   G1CollectedHeap* _g1h;
1086 public:
1087   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1088 
1089   virtual bool do_heap_region(HeapRegion* r) {
1090     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1091     return false;
1092   }
1093 };
1094 
1095 void G1ConcurrentMark::remark() {
1096   assert_at_safepoint_on_vm_thread();
1097 


1114   }
1115 
1116   double mark_work_end = os::elapsedTime();
1117 
1118   bool const mark_finished = !has_overflown();
1119   if (mark_finished) {
1120     weak_refs_work(false /* clear_all_soft_refs */);
1121 
1122     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1123     // We're done with marking.
1124     // This is the end of the marking cycle, we're expected all
1125     // threads to have SATB queues with active set to true.
1126     satb_mq_set.set_active_all_threads(false, /* new active value */
1127                                        true /* expected_active */);
1128 
1129     {
1130       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1131       flush_all_task_caches();
1132     }
1133 
1134     // Install newly created mark bitmap as "prev".
1135     swap_mark_bitmaps();
1136     {
1137       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1138       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1139       _g1h->heap_region_iterate(&cl);
1140       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1141                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1142     }
1143 
1144     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1145 
1146     assert(!restart_for_overflow(), "sanity");
1147     // Completely reset the marking state since marking completed
1148     reset_at_marking_complete();
1149   } else {
1150     // We overflowed.  Restart concurrent marking.
1151     _restart_for_overflow = true;
1152 
1153     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1154 
1155     // Clear the marking state because we will be restarting
1156     // marking due to overflowing the global mark stack.
1157     reset_marking_for_restart();
1158   }
1159 
1160   {
1161     GCTraceTime(Debug, gc, phases)("Report Object Count");
1162     report_object_count(mark_finished);
1163   }
1164 
1165   // Statistics
1166   double now = os::elapsedTime();
1167   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1168   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1169   _remark_times.add((now - start) * 1000.0);
1170 
1171   g1p->record_concurrent_mark_remark_end();
1172 }
1173 
1174 class G1CleanupTask : public AbstractGangTask {
1175   // Per-region work during the Cleanup pause.
1176   class G1CleanupRegionsClosure : public HeapRegionClosure {
1177     G1CollectedHeap* _g1h;
1178     size_t _freed_bytes;
1179     FreeRegionList* _local_cleanup_list;
1180     uint _old_regions_removed;
1181     uint _humongous_regions_removed;
1182     HRRSCleanupTask* _hrrs_cleanup_task;
1183 
1184   public:
1185     G1CleanupRegionsClosure(G1CollectedHeap* g1,
1186                             FreeRegionList* local_cleanup_list,
1187                             HRRSCleanupTask* hrrs_cleanup_task) :
1188       _g1h(g1),
1189       _freed_bytes(0),
1190       _local_cleanup_list(local_cleanup_list),
1191       _old_regions_removed(0),
1192       _humongous_regions_removed(0),
1193       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1194 
1195     size_t freed_bytes() { return _freed_bytes; }
1196     const uint old_regions_removed() { return _old_regions_removed; }
1197     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1198 
1199     bool do_heap_region(HeapRegion *hr) {


1200       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1201         _freed_bytes += hr->used();
1202         hr->set_containing_set(NULL);
1203         if (hr->is_humongous()) {
1204           _humongous_regions_removed++;
1205           _g1h->free_humongous_region(hr, _local_cleanup_list);
1206         } else {
1207           _old_regions_removed++;
1208           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1209         }
1210         hr->clear_cardtable();
1211         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1212         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1213       } else {
1214         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1215       }
1216 
1217       return false;
1218     }
1219   };


1274       }
1275     }
1276     // And actually make them available.
1277     _g1h->prepend_to_freelist(&empty_regions_list);
1278   }
1279 }
1280 
1281 void G1ConcurrentMark::cleanup() {
1282   assert_at_safepoint_on_vm_thread();
1283 
1284   // If a full collection has happened, we shouldn't do this.
1285   if (has_aborted()) {
1286     return;
1287   }
1288 
1289   G1Policy* g1p = _g1h->g1_policy();
1290   g1p->record_concurrent_mark_cleanup_start();
1291 
1292   double start = os::elapsedTime();
1293 
1294   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1295 
1296   {
1297     GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1298     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1299     _g1h->heap_region_iterate(&cl);
1300   }
1301 
1302   if (log_is_enabled(Trace, gc, liveness)) {
1303     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1304     _g1h->heap_region_iterate(&cl);
1305   }
1306 


1307   {
1308     GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1309     reclaim_empty_regions();
1310   }
1311 
1312   // Cleanup will have freed any regions completely full of garbage.
1313   // Update the soft reference policy with the new heap occupancy.
1314   Universe::update_heap_info_at_gc();
1315 
1316   // Clean out dead classes and update Metaspace sizes.
1317   if (ClassUnloadingWithConcurrentMark) {
1318     GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1319     ClassLoaderDataGraph::purge();
1320   }
1321   MetaspaceGC::compute_new_size();
1322 
1323   // We reclaimed old regions so we should calculate the sizes to make
1324   // sure we update the old gen/space data.
1325   _g1h->g1mm()->update_sizes();
1326 


1654   if (has_overflown()) {
1655     // We can not trust g1_is_alive if the marking stack overflowed
1656     return;
1657   }
1658 
1659   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1660 
1661   // Unload Klasses, String, Symbols, Code Cache, etc.
1662   if (ClassUnloadingWithConcurrentMark) {
1663     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1664     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1665     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1666   } else {
1667     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1668     // No need to clean string table and symbol table as they are treated as strong roots when
1669     // class unloading is disabled.
1670     _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1671   }
1672 }
1673 
1674 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1675 // the prev bitmap determining liveness.
1676 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1677   G1CollectedHeap* _g1;
1678  public:
1679   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
1680 
1681   bool do_object_b(oop obj) {
1682     HeapWord* addr = (HeapWord*)obj;
1683     return addr != NULL &&
1684            (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_dead(obj));
1685   }
1686 };
1687 
1688 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1689   // Depending on the completion of the marking liveness needs to be determined
1690   // using either the next or prev bitmap.
1691   if (mark_completed) {
1692     G1ObjectCountIsAliveClosure is_alive(_g1h);
1693     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1694   } else {
1695     G1CMIsAliveClosure is_alive(_g1h);
1696     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1697   }
1698 }
1699 
1700 
1701 void G1ConcurrentMark::swap_mark_bitmaps() {
1702   G1CMBitMap* temp = _prev_mark_bitmap;
1703   _prev_mark_bitmap = _next_mark_bitmap;
1704   _next_mark_bitmap = temp;
1705   _g1h->collector_state()->set_clearing_next_bitmap(true);
1706 }
1707 
1708 // Closure for marking entries in SATB buffers.
1709 class G1CMSATBBufferClosure : public SATBBufferClosure {
1710 private:
1711   G1CMTask* _task;
1712   G1CollectedHeap* _g1h;
1713 
1714   // This is very similar to G1CMTask::deal_with_reference, but with
1715   // more relaxed requirements for the argument, so this must be more
1716   // circumspect about treating the argument as an object.
1717   void do_entry(void* entry) const {
1718     _task->increment_refs_reached();
1719     oop const obj = static_cast<oop>(entry);


< prev index next >