< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 49511 : imported patch 8200234-g1concurrentmark-refactorings
rev 49512 : imported patch 8200234-stefanj-review
rev 49515 : 8200255: Remove G1CMTask::_concurrent
Reviewed-by: sangheki, sjohanss
rev 49516 : 8200074: Remove G1ConcurrentMark::_concurrent_marking_in_progress
Reviewed-by: sjohanss, sangheki
rev 49517 : imported patch 8200305-gc,liveness-output
rev 49518 : imported patch 8200385-prev-bitmap-marks-left
rev 49519 : imported patch 8200385-stefanj-review
rev 49520 : imported patch 8178105-switch-at-remark
rev 49521 : imported patch 8178105-stefanj-review
rev 49522 : imported patch 8178105-stefanj-review2
rev 49523 : imported patch 8154528-reclaim-at-remark
rev 49524 : [mq]: 8154528-stefanj-review


1121     // We're done with marking.
1122     // This is the end of the marking cycle, we're expected all
1123     // threads to have SATB queues with active set to true.
1124     satb_mq_set.set_active_all_threads(false, /* new active value */
1125                                        true /* expected_active */);
1126 
1127     {
1128       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1129       flush_all_task_caches();
1130     }
1131 
1132     // Install newly created mark bitmap as "prev".
1133     swap_mark_bitmaps();
1134     {
1135       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1136       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1137       _g1h->heap_region_iterate(&cl);
1138       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1139                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1140     }












1141 
1142     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1143 
1144     assert(!restart_for_overflow(), "sanity");
1145     // Completely reset the marking state since marking completed
1146     reset_at_marking_complete();
1147   } else {
1148     // We overflowed.  Restart concurrent marking.
1149     _restart_for_overflow = true;
1150 
1151     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1152 
1153     // Clear the marking state because we will be restarting
1154     // marking due to overflowing the global mark stack.
1155     reset_marking_for_restart();
1156   }
1157 
1158   {
1159     GCTraceTime(Debug, gc, phases)("Report Object Count");
1160     report_object_count(mark_finished);
1161   }
1162 
1163   // Statistics
1164   double now = os::elapsedTime();
1165   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1166   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1167   _remark_times.add((now - start) * 1000.0);
1168 
1169   g1p->record_concurrent_mark_remark_end();
1170 }
1171 
1172 class G1CleanupTask : public AbstractGangTask {
1173   // Per-region work during the Cleanup pause.
1174   class G1CleanupRegionsClosure : public HeapRegionClosure {
1175     G1CollectedHeap* _g1h;
1176     size_t _freed_bytes;
1177     FreeRegionList* _local_cleanup_list;
1178     uint _old_regions_removed;
1179     uint _humongous_regions_removed;
1180     HRRSCleanupTask* _hrrs_cleanup_task;
1181 
1182   public:
1183     G1CleanupRegionsClosure(G1CollectedHeap* g1,
1184                             FreeRegionList* local_cleanup_list,
1185                             HRRSCleanupTask* hrrs_cleanup_task) :
1186       _g1h(g1),
1187       _freed_bytes(0),
1188       _local_cleanup_list(local_cleanup_list),
1189       _old_regions_removed(0),
1190       _humongous_regions_removed(0),
1191       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1192 
1193     size_t freed_bytes() { return _freed_bytes; }
1194     const uint old_regions_removed() { return _old_regions_removed; }
1195     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1196 
1197     bool do_heap_region(HeapRegion *hr) {
1198       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1199         _freed_bytes += hr->used();
1200         hr->set_containing_set(NULL);
1201         if (hr->is_humongous()) {
1202           _humongous_regions_removed++;
1203           _g1h->free_humongous_region(hr, _local_cleanup_list);
1204         } else {
1205           _old_regions_removed++;
1206           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1207         }
1208         hr->clear_cardtable();
1209         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1210         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1211       } else {
1212         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1213       }
1214 
1215       return false;
1216     }
1217   };
1218 
1219   G1CollectedHeap* _g1h;
1220   FreeRegionList* _cleanup_list;
1221   HeapRegionClaimer _hrclaimer;
1222 
1223 public:
1224   G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1225     AbstractGangTask("G1 Cleanup"),
1226     _g1h(g1h),
1227     _cleanup_list(cleanup_list),
1228     _hrclaimer(n_workers) {
1229 
1230     HeapRegionRemSet::reset_for_cleanup_tasks();
1231   }
1232 
1233   void work(uint worker_id) {
1234     FreeRegionList local_cleanup_list("Local Cleanup List");
1235     HRRSCleanupTask hrrs_cleanup_task;
1236     G1CleanupRegionsClosure cl(_g1h,
1237                                &local_cleanup_list,
1238                                &hrrs_cleanup_task);
1239     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1240     assert(cl.is_complete(), "Shouldn't have aborted!");
1241 
1242     // Now update the old/humongous region sets
1243     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1244     {
1245       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1246       _g1h->decrement_summary_bytes(cl.freed_bytes());
1247 
1248       _cleanup_list->add_ordered(&local_cleanup_list);
1249       assert(local_cleanup_list.is_empty(), "post-condition");
1250 
1251       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1252     }
1253   }
1254 };
1255 
1256 void G1ConcurrentMark::reclaim_empty_regions() {
1257   WorkGang* workers = _g1h->workers();
1258   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1259 
1260   G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
1261   workers->run_task(&cl);
1262 
1263   if (!empty_regions_list.is_empty()) {
1264     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1265     // Now print the empty regions list.
1266     G1HRPrinter* hrp = _g1h->hr_printer();
1267     if (hrp->is_active()) {
1268       FreeRegionListIterator iter(&empty_regions_list);
1269       while (iter.more_available()) {
1270         HeapRegion* hr = iter.get_next();
1271         hrp->cleanup(hr);
1272       }
1273     }
1274     // And actually make them available.
1275     _g1h->prepend_to_freelist(&empty_regions_list);
1276   }
1277 }
1278 












1279 void G1ConcurrentMark::cleanup() {
1280   assert_at_safepoint_on_vm_thread();
1281 
1282   // If a full collection has happened, we shouldn't do this.
1283   if (has_aborted()) {
1284     return;
1285   }
1286 
1287   G1Policy* g1p = _g1h->g1_policy();
1288   g1p->record_concurrent_mark_cleanup_start();
1289 
1290   double start = os::elapsedTime();
1291 
1292   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1293 
1294   {
1295     GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1296     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1297     _g1h->heap_region_iterate(&cl);
1298   }
1299 
1300   if (log_is_enabled(Trace, gc, liveness)) {
1301     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1302     _g1h->heap_region_iterate(&cl);
1303   }
1304 
1305   {
1306     GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1307     reclaim_empty_regions();
1308   }
1309 
1310   // Cleanup will have freed any regions completely full of garbage.
1311   // Update the soft reference policy with the new heap occupancy.
1312   Universe::update_heap_info_at_gc();
1313 
1314   // Clean out dead classes and update Metaspace sizes.
1315   if (ClassUnloadingWithConcurrentMark) {
1316     GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1317     ClassLoaderDataGraph::purge();
1318   }
1319   MetaspaceGC::compute_new_size();
1320 
1321   // We reclaimed old regions so we should calculate the sizes to make
1322   // sure we update the old gen/space data.
1323   _g1h->g1mm()->update_sizes();
1324 
1325   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1326 
1327   // We need to make this be a "collection" so any collection pause that
1328   // races with it goes around and waits for Cleanup to finish.
1329   _g1h->increment_total_collections();
1330 
1331   // Local statistics
1332   double recent_cleanup_time = (os::elapsedTime() - start);
1333   _total_cleanup_time += recent_cleanup_time;
1334   _cleanup_times.add(recent_cleanup_time);
1335 
1336   {
1337     GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
1338     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1339   }
1340 }
1341 
1342 // Supporting Object and Oop closures for reference discovery
1343 // and processing in during marking




1121     // We're done with marking.
1122     // This is the end of the marking cycle, we're expected all
1123     // threads to have SATB queues with active set to true.
1124     satb_mq_set.set_active_all_threads(false, /* new active value */
1125                                        true /* expected_active */);
1126 
1127     {
1128       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1129       flush_all_task_caches();
1130     }
1131 
1132     // Install newly created mark bitmap as "prev".
1133     swap_mark_bitmaps();
1134     {
1135       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1136       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1137       _g1h->heap_region_iterate(&cl);
1138       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1139                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1140     }
1141     {
1142       GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1143       reclaim_empty_regions();
1144     }
1145 
1146     // Clean out dead classes
1147     if (ClassUnloadingWithConcurrentMark) {
1148       GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1149       ClassLoaderDataGraph::purge();
1150     }
1151 
1152     compute_new_sizes();
1153 
1154     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1155 
1156     assert(!restart_for_overflow(), "sanity");
1157     // Completely reset the marking state since marking completed
1158     reset_at_marking_complete();
1159   } else {
1160     // We overflowed.  Restart concurrent marking.
1161     _restart_for_overflow = true;
1162 
1163     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1164 
1165     // Clear the marking state because we will be restarting
1166     // marking due to overflowing the global mark stack.
1167     reset_marking_for_restart();
1168   }
1169 
1170   {
1171     GCTraceTime(Debug, gc, phases)("Report Object Count");
1172     report_object_count(mark_finished);
1173   }
1174 
1175   // Statistics
1176   double now = os::elapsedTime();
1177   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1178   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1179   _remark_times.add((now - start) * 1000.0);
1180 
1181   g1p->record_concurrent_mark_remark_end();
1182 }
1183 
1184 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1185   // Per-region work during the Cleanup pause.
1186   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1187     G1CollectedHeap* _g1h;
1188     size_t _freed_bytes;
1189     FreeRegionList* _local_cleanup_list;
1190     uint _old_regions_removed;
1191     uint _humongous_regions_removed;
1192     HRRSCleanupTask* _hrrs_cleanup_task;
1193 
1194   public:
1195     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1,
1196                                  FreeRegionList* local_cleanup_list,
1197                                  HRRSCleanupTask* hrrs_cleanup_task) :
1198       _g1h(g1),
1199       _freed_bytes(0),
1200       _local_cleanup_list(local_cleanup_list),
1201       _old_regions_removed(0),
1202       _humongous_regions_removed(0),
1203       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1204 
1205     size_t freed_bytes() { return _freed_bytes; }
1206     const uint old_regions_removed() { return _old_regions_removed; }
1207     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1208 
1209     bool do_heap_region(HeapRegion *hr) {
1210       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1211         _freed_bytes += hr->used();
1212         hr->set_containing_set(NULL);
1213         if (hr->is_humongous()) {
1214           _humongous_regions_removed++;
1215           _g1h->free_humongous_region(hr, _local_cleanup_list);
1216         } else {
1217           _old_regions_removed++;
1218           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1219         }
1220         hr->clear_cardtable();
1221         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1222         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1223       } else {
1224         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1225       }
1226 
1227       return false;
1228     }
1229   };
1230 
1231   G1CollectedHeap* _g1h;
1232   FreeRegionList* _cleanup_list;
1233   HeapRegionClaimer _hrclaimer;
1234 
1235 public:
1236   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1237     AbstractGangTask("G1 Cleanup"),
1238     _g1h(g1h),
1239     _cleanup_list(cleanup_list),
1240     _hrclaimer(n_workers) {
1241 
1242     HeapRegionRemSet::reset_for_cleanup_tasks();
1243   }
1244 
1245   void work(uint worker_id) {
1246     FreeRegionList local_cleanup_list("Local Cleanup List");
1247     HRRSCleanupTask hrrs_cleanup_task;
1248     G1ReclaimEmptyRegionsClosure cl(_g1h,
1249                                     &local_cleanup_list,
1250                                     &hrrs_cleanup_task);
1251     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1252     assert(cl.is_complete(), "Shouldn't have aborted!");
1253 
1254     // Now update the old/humongous region sets
1255     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1256     {
1257       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1258       _g1h->decrement_summary_bytes(cl.freed_bytes());
1259 
1260       _cleanup_list->add_ordered(&local_cleanup_list);
1261       assert(local_cleanup_list.is_empty(), "post-condition");
1262 
1263       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1264     }
1265   }
1266 };
1267 
1268 void G1ConcurrentMark::reclaim_empty_regions() {
1269   WorkGang* workers = _g1h->workers();
1270   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1271 
1272   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1273   workers->run_task(&cl);
1274 
1275   if (!empty_regions_list.is_empty()) {
1276     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1277     // Now print the empty regions list.
1278     G1HRPrinter* hrp = _g1h->hr_printer();
1279     if (hrp->is_active()) {
1280       FreeRegionListIterator iter(&empty_regions_list);
1281       while (iter.more_available()) {
1282         HeapRegion* hr = iter.get_next();
1283         hrp->cleanup(hr);
1284       }
1285     }
1286     // And actually make them available.
1287     _g1h->prepend_to_freelist(&empty_regions_list);
1288   }
1289 }
1290 
1291 void G1ConcurrentMark::compute_new_sizes() {
1292   MetaspaceGC::compute_new_size();
1293 
1294   // Cleanup will have freed any regions completely full of garbage.
1295   // Update the soft reference policy with the new heap occupancy.
1296   Universe::update_heap_info_at_gc();
1297 
1298   // We reclaimed old regions so we should calculate the sizes to make
1299   // sure we update the old gen/space data.
1300   _g1h->g1mm()->update_sizes();
1301 }
1302 
1303 void G1ConcurrentMark::cleanup() {
1304   assert_at_safepoint_on_vm_thread();
1305 
1306   // If a full collection has happened, we shouldn't do this.
1307   if (has_aborted()) {
1308     return;
1309   }
1310 
1311   G1Policy* g1p = _g1h->g1_policy();
1312   g1p->record_concurrent_mark_cleanup_start();
1313 
1314   double start = os::elapsedTime();
1315 
1316   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1317 
1318   {
1319     GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1320     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1321     _g1h->heap_region_iterate(&cl);
1322   }
1323 
1324   if (log_is_enabled(Trace, gc, liveness)) {
1325     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1326     _g1h->heap_region_iterate(&cl);
1327   }




















1328 
1329   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1330 
1331   // We need to make this be a "collection" so any collection pause that
1332   // races with it goes around and waits for Cleanup to finish.
1333   _g1h->increment_total_collections();
1334 
1335   // Local statistics
1336   double recent_cleanup_time = (os::elapsedTime() - start);
1337   _total_cleanup_time += recent_cleanup_time;
1338   _cleanup_times.add(recent_cleanup_time);
1339 
1340   {
1341     GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
1342     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1343   }
1344 }
1345 
1346 // Supporting Object and Oop closures for reference discovery
1347 // and processing in during marking


< prev index next >