13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/g1/concurrentMarkThread.inline.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectorState.hpp"
32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
33 #include "gc/g1/g1HeapVerifier.hpp"
34 #include "gc/g1/g1OopClosures.inline.hpp"
35 #include "gc/g1/g1Policy.hpp"
36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
37 #include "gc/g1/g1StringDedup.hpp"
38 #include "gc/g1/heapRegion.inline.hpp"
39 #include "gc/g1/heapRegionRemSet.hpp"
40 #include "gc/g1/heapRegionSet.inline.hpp"
41 #include "gc/shared/adaptiveSizePolicy.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/genOopClosures.inline.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/strongRootsScope.hpp"
49 #include "gc/shared/suspendibleThreadSet.hpp"
50 #include "gc/shared/taskqueue.inline.hpp"
51 #include "gc/shared/vmGCOperations.hpp"
52 #include "gc/shared/weakProcessor.hpp"
53 #include "logging/log.hpp"
475 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
476
477 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
478 _num_active_tasks = _max_num_tasks;
479
480 for (uint i = 0; i < _max_num_tasks; ++i) {
481 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
482 task_queue->initialize();
483 _task_queues->register_queue(i, task_queue);
484
485 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
486
487 _accum_task_vtime[i] = 0.0;
488 }
489
490 reset_at_marking_complete();
491 _completed_initialization = true;
492 }
493
494 void G1ConcurrentMark::reset() {
495 reset_marking_for_restart();
496
497 // Reset all tasks, since different phases will use different number of active
498 // threads. So, it's easiest to have all of them ready.
499 for (uint i = 0; i < _max_num_tasks; ++i) {
500 _tasks[i]->reset(_next_mark_bitmap);
501 }
502
503 uint max_regions = _g1h->max_regions();
504 for (uint i = 0; i < max_regions; i++) {
505 _top_at_rebuild_starts[i] = NULL;
506 _region_mark_stats[i].clear();
507 }
508
509 // we need this to make sure that the flag is on during the evac
510 // pause with initial mark piggy-backed
511 set_concurrent_marking_in_progress();
512 }
513
514 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
515 for (uint j = 0; j < _max_num_tasks; ++j) {
516 _tasks[j]->clear_mark_stats_cache(region_idx);
517 }
518 _top_at_rebuild_starts[region_idx] = NULL;
519 _region_mark_stats[region_idx].clear();
520 }
521
522 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
523 assert_at_safepoint_on_vm_thread();
524
525 // Need to clear mark bit of the humongous object if already set and during a marking cycle.
526 if (_next_mark_bitmap->is_marked(r->bottom())) {
527 _next_mark_bitmap->clear(r->bottom());
528 }
529
530 // Clear any statistics about the region gathered so far.
531 uint const region_idx = r->hrm_index();
532 if (r->is_humongous()) {
533 assert(r->is_starts_humongous(), "Got humongous continues region here");
534 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
535 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
536 clear_statistics_in_region(j);
537 }
538 } else {
539 clear_statistics_in_region(region_idx);
540 }
541 }
542
543 void G1ConcurrentMark::reset_marking_for_restart() {
544 _global_mark_stack.set_empty();
545
546 // Expand the marking stack, if we have to and if we can.
547 if (has_overflown()) {
548 _global_mark_stack.expand();
549
550 uint max_regions = _g1h->max_regions();
551 for (uint i = 0; i < max_regions; i++) {
552 _region_mark_stats[i].clear_during_overflow();
553 }
554 }
555
556 clear_has_overflown();
557 _finger = _heap.start();
558
559 for (uint i = 0; i < _max_num_tasks; ++i) {
560 G1CMTaskQueue* queue = _task_queues->queue(i);
561 queue->set_empty();
562 }
728 HeapWord* end = r->end();
729 return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
730 }
731 };
732
733 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
734 CheckBitmapClearHRClosure cl(_next_mark_bitmap);
735 _g1h->heap_region_iterate(&cl);
736 return cl.is_complete();
737 }
738
739 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
740 public:
741 bool do_heap_region(HeapRegion* r) {
742 r->note_start_of_marking();
743 return false;
744 }
745 };
746
747 void G1ConcurrentMark::pre_initial_mark() {
748 _has_aborted = false;
749
750 // Initialize marking structures. This has to be done in a STW phase.
751 reset();
752
753 // For each region note start of marking.
754 NoteStartOfMarkHRClosure startcl;
755 _g1h->heap_region_iterate(&startcl);
756 }
757
758
759 void G1ConcurrentMark::post_initial_mark() {
760 // Start Concurrent Marking weak-reference discovery.
761 ReferenceProcessor* rp = _g1h->ref_processor_cm();
762 // enable ("weak") refs discovery
763 rp->enable_discovery();
764 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
765
766 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
767 // This is the start of the marking cycle, we're expected all
768 // threads to have SATB queues with active set to false.
769 satb_mq_set.set_active_all_threads(true, /* new active value */
936 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
937 task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
938 _concurrent_workers->run_task(&task, _num_concurrent_workers);
939
940 // It's possible that has_aborted() is true here without actually
941 // aborting the survivor scan earlier. This is OK as it's
942 // mainly used for sanity checking.
943 root_regions()->scan_finished();
944 }
945 }
946
947 void G1ConcurrentMark::concurrent_cycle_start() {
948 _gc_timer_cm->register_gc_start();
949
950 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
951
952 _g1h->trace_heap_before_gc(_gc_tracer_cm);
953 }
954
955 void G1ConcurrentMark::concurrent_cycle_end() {
956 _g1h->trace_heap_after_gc(_gc_tracer_cm);
957
958 if (has_aborted()) {
959 log_info(gc, marking)("Concurrent Mark Abort");
960 _gc_tracer_cm->report_concurrent_mode_failure();
961 }
962
963 _gc_timer_cm->register_gc_end();
964
965 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
966 }
967
968 void G1ConcurrentMark::mark_from_roots() {
969 _restart_for_overflow = false;
970
971 _num_concurrent_workers = calc_active_marking_workers();
972
973 uint active_workers = MAX2(1U, _num_concurrent_workers);
974
975 // Setting active workers is not guaranteed since fewer
976 // worker threads may currently exist and more may not be
977 // available.
978 active_workers = _concurrent_workers->update_active_workers(active_workers);
979 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
980
981 // Parallel task terminator is set in "set_concurrency_and_phase()"
982 set_concurrency_and_phase(active_workers, true /* concurrent */);
983
984 G1CMConcurrentMarkingTask marking_task(this);
985 _concurrent_workers->run_task(&marking_task);
986 print_stats();
987 }
988
989 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
990 G1CollectedHeap* _g1h;
991 G1ConcurrentMark* _cm;
992
993 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
994
995 void update_remset_before_rebuild(HeapRegion * hr) {
996 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
997
998 size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
999 bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1000 if (selected_for_rebuild) {
1001 _num_regions_selected_for_rebuild++;
1002 }
1003 _cm->update_top_at_rebuild_start(hr);
1004 }
1005
1006 public:
1007 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1008 _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
1018 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1019 G1CollectedHeap* _g1h;
1020 public:
1021 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1022
1023 virtual bool do_heap_region(HeapRegion* r) {
1024 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1025 return false;
1026 }
1027 };
1028
1029 void G1ConcurrentMark::remark() {
1030 assert_at_safepoint_on_vm_thread();
1031
1032 // If a full collection has happened, we should not continue. However we might
1033 // have ended up here as the Remark VM operation has been scheduled already.
1034 if (has_aborted()) {
1035 return;
1036 }
1037
1038 if (VerifyDuringGC) {
1039 _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (Remark before)");
1040 }
1041 _g1h->verifier()->check_bitmaps("Remark Start");
1042
1043 G1Policy* g1p = _g1h->g1_policy();
1044 g1p->record_concurrent_mark_remark_start();
1045
1046 double start = os::elapsedTime();
1047
1048 finalize_marking();
1049
1050 double mark_work_end = os::elapsedTime();
1051
1052 weak_refs_work(false /* clear_all_soft_refs */);
1053
1054 if (has_overflown()) {
1055 // We overflowed. Restart concurrent marking.
1056 _restart_for_overflow = true;
1057
1058 // Verify the heap w.r.t. the previous marking bitmap.
1059 if (VerifyDuringGC) {
1060 _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (Remark overflow)");
1061 }
1062
1063 // Clear the marking state because we will be restarting
1064 // marking due to overflowing the global mark stack.
1065 reset_marking_for_restart();
1066 } else {
1067 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1068 // We're done with marking.
1069 // This is the end of the marking cycle, we're expected all
1070 // threads to have SATB queues with active set to true.
1071 satb_mq_set.set_active_all_threads(false, /* new active value */
1072 true /* expected_active */);
1073
1074 {
1075 GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1076 flush_all_task_caches();
1077 }
1078
1079 {
1080 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1081 G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1082 _g1h->heap_region_iterate(&cl);
1083 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1084 _g1h->num_regions(), cl.num_selected_for_rebuild());
1085 }
1086
1087 if (VerifyDuringGC) {
1088 _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (Remark after)");
1089 }
1090 _g1h->verifier()->check_bitmaps("Remark End");
1091 assert(!restart_for_overflow(), "sanity");
1092 // Completely reset the marking state since marking completed
1093 reset_at_marking_complete();
1094 }
1095
1096 // Statistics
1097 double now = os::elapsedTime();
1098 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1099 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1100 _remark_times.add((now - start) * 1000.0);
1101
1102 g1p->record_concurrent_mark_remark_end();
1103
1104 G1CMIsAliveClosure is_alive(_g1h);
1105 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1106 }
1107
1108 class G1CleanupTask : public AbstractGangTask {
1109 // Per-region work during the Cleanup pause.
1110 class G1CleanupRegionsClosure : public HeapRegionClosure {
1111 G1CollectedHeap* _g1h;
1112 size_t _freed_bytes;
1113 FreeRegionList* _local_cleanup_list;
1114 uint _old_regions_removed;
1115 uint _humongous_regions_removed;
1116 HRRSCleanupTask* _hrrs_cleanup_task;
1117
1118 public:
1119 G1CleanupRegionsClosure(G1CollectedHeap* g1,
1120 FreeRegionList* local_cleanup_list,
1121 HRRSCleanupTask* hrrs_cleanup_task) :
1122 _g1h(g1),
1123 _freed_bytes(0),
1124 _local_cleanup_list(local_cleanup_list),
1125 _old_regions_removed(0),
1127 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1128
1129 size_t freed_bytes() { return _freed_bytes; }
1130 const uint old_regions_removed() { return _old_regions_removed; }
1131 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1132
1133 bool do_heap_region(HeapRegion *hr) {
1134 hr->note_end_of_marking();
1135
1136 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1137 _freed_bytes += hr->used();
1138 hr->set_containing_set(NULL);
1139 if (hr->is_humongous()) {
1140 _humongous_regions_removed++;
1141 _g1h->free_humongous_region(hr, _local_cleanup_list);
1142 } else {
1143 _old_regions_removed++;
1144 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1145 }
1146 hr->clear_cardtable();
1147 } else {
1148 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1149 }
1150
1151 return false;
1152 }
1153 };
1154
1155 G1CollectedHeap* _g1h;
1156 FreeRegionList* _cleanup_list;
1157 HeapRegionClaimer _hrclaimer;
1158
1159 public:
1160 G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1161 AbstractGangTask("G1 Cleanup"),
1162 _g1h(g1h),
1163 _cleanup_list(cleanup_list),
1164 _hrclaimer(n_workers) {
1165
1166 HeapRegionRemSet::reset_for_cleanup_tasks();
1180 {
1181 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1182 _g1h->decrement_summary_bytes(cl.freed_bytes());
1183
1184 _cleanup_list->add_ordered(&local_cleanup_list);
1185 assert(local_cleanup_list.is_empty(), "post-condition");
1186
1187 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1188 }
1189 }
1190 };
1191
1192 void G1ConcurrentMark::reclaim_empty_regions() {
1193 WorkGang* workers = _g1h->workers();
1194 FreeRegionList empty_regions_list("Empty Regions After Mark List");
1195
1196 G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
1197 workers->run_task(&cl);
1198
1199 if (!empty_regions_list.is_empty()) {
1200 // Now print the empty regions list.
1201 G1HRPrinter* hrp = _g1h->hr_printer();
1202 if (hrp->is_active()) {
1203 FreeRegionListIterator iter(&empty_regions_list);
1204 while (iter.more_available()) {
1205 HeapRegion* hr = iter.get_next();
1206 hrp->cleanup(hr);
1207 }
1208 }
1209 // And actually make them available.
1210 _g1h->prepend_to_freelist(&empty_regions_list);
1211 }
1212 }
1213
1214 void G1ConcurrentMark::cleanup() {
1215 assert_at_safepoint_on_vm_thread();
1216
1217 // If a full collection has happened, we shouldn't do this.
1218 if (has_aborted()) {
1219 return;
1220 }
1221
1222 _g1h->verifier()->verify_region_sets_optional();
1223
1224 if (VerifyDuringGC) { // While rebuilding the remembered set we used the next marking...
1225 _g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "During GC (Cleanup before)");
1226 }
1227 _g1h->verifier()->check_bitmaps("Cleanup Start");
1228
1229 G1Policy* g1p = _g1h->g1_policy();
1230 g1p->record_concurrent_mark_cleanup_start();
1231
1232 double start = os::elapsedTime();
1233
1234 {
1235 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1236 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1237 _g1h->heap_region_iterate(&cl);
1238 }
1239
1240 double count_end = os::elapsedTime();
1241 double this_final_counting_time = (count_end - start);
1242 _total_cleanup_time += this_final_counting_time;
1243
1244 if (log_is_enabled(Trace, gc, liveness)) {
1245 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1246 _g1h->heap_region_iterate(&cl);
1247 }
1248
1249 // Install newly created mark bitmap as "prev".
1250 swap_mark_bitmaps();
1251 {
1252 GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1253 reclaim_empty_regions();
1254 }
1255
1256 {
1257 GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
1258 _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1259 }
1260
1261 // Statistics.
1262 double end = os::elapsedTime();
1263 _cleanup_times.add((end - start) * 1000.0);
1264
1265 // Cleanup will have freed any regions completely full of garbage.
1266 // Update the soft reference policy with the new heap occupancy.
1267 Universe::update_heap_info_at_gc();
1268
1269 if (VerifyDuringGC) {
1270 _g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (Cleanup after)");
1271 }
1272
1273 _g1h->verifier()->check_bitmaps("Cleanup End");
1274
1275 _g1h->verifier()->verify_region_sets_optional();
1276
1277 // We need to make this be a "collection" so any collection pause that
1278 // races with it goes around and waits for completeCleanup to finish.
1279 _g1h->increment_total_collections();
1280
1281 // Clean out dead classes and update Metaspace sizes.
1282 if (ClassUnloadingWithConcurrentMark) {
1283 ClassLoaderDataGraph::purge();
1284 }
1285 MetaspaceGC::compute_new_size();
1286
1287 // We reclaimed old regions so we should calculate the sizes to make
1288 // sure we update the old gen/space data.
1289 _g1h->g1mm()->update_sizes();
1290 }
1291
1292 // Supporting Object and Oop closures for reference discovery
1293 // and processing in during marking
1294
1295 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1296 HeapWord* addr = (HeapWord*)obj;
1297 return addr != NULL &&
1298 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj));
1299 }
1300
1301 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1302 // Uses the G1CMTask associated with a worker thread (for serial reference
1303 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1304 // trace referent objects.
1305 //
1306 // Using the G1CMTask and embedded local queues avoids having the worker
1307 // threads operating on the global mark stack. This reduces the risk
1308 // of overflowing the stack - which we would rather avoid at this late
1309 // state. Also using the tasks' local queues removes the potential
1484 }
1485 };
1486
1487 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1488 assert(_workers != NULL, "Need parallel worker threads.");
1489 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1490
1491 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1492
1493 // Not strictly necessary but...
1494 //
1495 // We need to reset the concurrency level before each
1496 // proxy task execution, so that the termination protocol
1497 // and overflow handling in G1CMTask::do_marking_step() knows
1498 // how many workers to wait for.
1499 _cm->set_concurrency(_active_workers);
1500 _workers->run_task(&enq_task_proxy);
1501 }
1502
1503 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1504 if (has_overflown()) {
1505 // Skip processing the discovered references if we have
1506 // overflown the global marking stack. Reference objects
1507 // only get discovered once so it is OK to not
1508 // de-populate the discovered reference lists. We could have,
1509 // but the only benefit would be that, when marking restarts,
1510 // less reference objects are discovered.
1511 return;
1512 }
1513
1514 ResourceMark rm;
1515 HandleMark hm;
1516
1517 // Is alive closure.
1518 G1CMIsAliveClosure g1_is_alive(_g1h);
1519
1520 // Inner scope to exclude the cleaning of the string and symbol
1521 // tables from the displayed time.
1522 {
1523 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1524
1525 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1526
1527 // See the comment in G1CollectedHeap::ref_processing_init()
1528 // about how reference processing currently works in G1.
1529
1530 // Set the soft reference policy
1531 rp->setup_policy(clear_all_soft_refs);
1532 assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1533
1612 if (has_overflown()) {
1613 // We can not trust g1_is_alive if the marking stack overflowed
1614 return;
1615 }
1616
1617 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1618
1619 // Unload Klasses, String, Symbols, Code Cache, etc.
1620 if (ClassUnloadingWithConcurrentMark) {
1621 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1622 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1623 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1624 } else {
1625 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1626 // No need to clean string table and symbol table as they are treated as strong roots when
1627 // class unloading is disabled.
1628 _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1629 }
1630 }
1631
1632 void G1ConcurrentMark::swap_mark_bitmaps() {
1633 G1CMBitMap* temp = _prev_mark_bitmap;
1634 _prev_mark_bitmap = _next_mark_bitmap;
1635 _next_mark_bitmap = temp;
1636 }
1637
1638 // Closure for marking entries in SATB buffers.
1639 class G1CMSATBBufferClosure : public SATBBufferClosure {
1640 private:
1641 G1CMTask* _task;
1642 G1CollectedHeap* _g1h;
1643
1644 // This is very similar to G1CMTask::deal_with_reference, but with
1645 // more relaxed requirements for the argument, so this must be more
1646 // circumspect about treating the argument as an object.
1647 void do_entry(void* entry) const {
1648 _task->increment_refs_reached();
1649 oop const obj = static_cast<oop>(entry);
1650 _task->make_reference_grey(obj);
1651 }
1652
1653 public:
1654 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1655 : _task(task), _g1h(g1h) { }
1713
1714 do {
1715 task->do_marking_step(1000000000.0 /* something very large */,
1716 true /* do_termination */,
1717 false /* is_serial */);
1718 } while (task->has_aborted() && !_cm->has_overflown());
1719 // If we overflow, then we do not want to restart. We instead
1720 // want to abort remark and do concurrent marking again.
1721 task->record_end_time();
1722 }
1723
1724 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1725 AbstractGangTask("Par Remark"), _cm(cm) {
1726 _cm->terminator()->reset_for_reuse(active_workers);
1727 }
1728 };
1729
1730 void G1ConcurrentMark::finalize_marking() {
1731 ResourceMark rm;
1732 HandleMark hm;
1733
1734 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1735
1736 _g1h->ensure_parsability(false);
1737
1738 // this is remark, so we'll use up all active threads
1739 uint active_workers = _g1h->workers()->active_workers();
1740 set_concurrency_and_phase(active_workers, false /* concurrent */);
1741 // Leave _parallel_marking_threads at it's
1742 // value originally calculated in the G1ConcurrentMark
1743 // constructor and pass values of the active workers
1744 // through the gang in the task.
1745
1746 {
1747 StrongRootsScope srs(active_workers);
1748
1749 G1CMRemarkTask remarkTask(this, active_workers);
1750 // We will start all available threads, even if we decide that the
1751 // active_workers will be fewer. The extra ones will just bail out
1752 // immediately.
1753 _g1h->workers()->run_task(&remarkTask);
1754 }
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/g1/concurrentMarkThread.inline.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectorState.hpp"
32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
33 #include "gc/g1/g1OopClosures.inline.hpp"
34 #include "gc/g1/g1Policy.hpp"
35 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
36 #include "gc/g1/g1StringDedup.hpp"
37 #include "gc/g1/heapRegion.inline.hpp"
38 #include "gc/g1/heapRegionRemSet.hpp"
39 #include "gc/g1/heapRegionSet.inline.hpp"
40 #include "gc/shared/adaptiveSizePolicy.hpp"
41 #include "gc/shared/gcId.hpp"
42 #include "gc/shared/gcTimer.hpp"
43 #include "gc/shared/gcTrace.hpp"
44 #include "gc/shared/gcTraceTime.inline.hpp"
45 #include "gc/shared/genOopClosures.inline.hpp"
46 #include "gc/shared/referencePolicy.hpp"
47 #include "gc/shared/strongRootsScope.hpp"
48 #include "gc/shared/suspendibleThreadSet.hpp"
49 #include "gc/shared/taskqueue.inline.hpp"
50 #include "gc/shared/vmGCOperations.hpp"
51 #include "gc/shared/weakProcessor.hpp"
52 #include "logging/log.hpp"
474 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
475
476 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
477 _num_active_tasks = _max_num_tasks;
478
479 for (uint i = 0; i < _max_num_tasks; ++i) {
480 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
481 task_queue->initialize();
482 _task_queues->register_queue(i, task_queue);
483
484 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
485
486 _accum_task_vtime[i] = 0.0;
487 }
488
489 reset_at_marking_complete();
490 _completed_initialization = true;
491 }
492
493 void G1ConcurrentMark::reset() {
494 _has_aborted = false;
495
496 reset_marking_for_restart();
497
498 // Reset all tasks, since different phases will use different number of active
499 // threads. So, it's easiest to have all of them ready.
500 for (uint i = 0; i < _max_num_tasks; ++i) {
501 _tasks[i]->reset(_next_mark_bitmap);
502 }
503
504 uint max_regions = _g1h->max_regions();
505 for (uint i = 0; i < max_regions; i++) {
506 _top_at_rebuild_starts[i] = NULL;
507 _region_mark_stats[i].clear();
508 }
509
510 // we need this to make sure that the flag is on during the evac
511 // pause with initial mark piggy-backed
512 set_concurrent_marking_in_progress();
513 }
514
515 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
516 for (uint j = 0; j < _max_num_tasks; ++j) {
517 _tasks[j]->clear_mark_stats_cache(region_idx);
518 }
519 _top_at_rebuild_starts[region_idx] = NULL;
520 _region_mark_stats[region_idx].clear();
521 }
522
523 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
524 uint const region_idx = r->hrm_index();
525 if (r->is_humongous()) {
526 assert(r->is_starts_humongous(), "Got humongous continues region here");
527 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
528 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
529 clear_statistics_in_region(j);
530 }
531 } else {
532 clear_statistics_in_region(region_idx);
533 }
534 }
535
536 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
537 assert_at_safepoint_on_vm_thread();
538
539 G1CMBitMap* const bitmap = _g1h->collector_state()->mark_or_rebuild_in_progress() ? _next_mark_bitmap : _prev_mark_bitmap;
540 // Need to clear mark bit of the humongous object if already set and during a marking cycle.
541 if (bitmap->is_marked(r->bottom())) {
542 bitmap->clear(r->bottom());
543 }
544
545 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
546 return;
547 }
548
549 // Clear any statistics about the region gathered so far.
550 clear_statistics(r);
551 }
552
553 void G1ConcurrentMark::reset_marking_for_restart() {
554 _global_mark_stack.set_empty();
555
556 // Expand the marking stack, if we have to and if we can.
557 if (has_overflown()) {
558 _global_mark_stack.expand();
559
560 uint max_regions = _g1h->max_regions();
561 for (uint i = 0; i < max_regions; i++) {
562 _region_mark_stats[i].clear_during_overflow();
563 }
564 }
565
566 clear_has_overflown();
567 _finger = _heap.start();
568
569 for (uint i = 0; i < _max_num_tasks; ++i) {
570 G1CMTaskQueue* queue = _task_queues->queue(i);
571 queue->set_empty();
572 }
738 HeapWord* end = r->end();
739 return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
740 }
741 };
742
743 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
744 CheckBitmapClearHRClosure cl(_next_mark_bitmap);
745 _g1h->heap_region_iterate(&cl);
746 return cl.is_complete();
747 }
748
749 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
750 public:
751 bool do_heap_region(HeapRegion* r) {
752 r->note_start_of_marking();
753 return false;
754 }
755 };
756
757 void G1ConcurrentMark::pre_initial_mark() {
758 // Initialize marking structures. This has to be done in a STW phase.
759 reset();
760
761 // For each region note start of marking.
762 NoteStartOfMarkHRClosure startcl;
763 _g1h->heap_region_iterate(&startcl);
764 }
765
766
767 void G1ConcurrentMark::post_initial_mark() {
768 // Start Concurrent Marking weak-reference discovery.
769 ReferenceProcessor* rp = _g1h->ref_processor_cm();
770 // enable ("weak") refs discovery
771 rp->enable_discovery();
772 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
773
774 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
775 // This is the start of the marking cycle, we're expected all
776 // threads to have SATB queues with active set to false.
777 satb_mq_set.set_active_all_threads(true, /* new active value */
944 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
945 task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
946 _concurrent_workers->run_task(&task, _num_concurrent_workers);
947
948 // It's possible that has_aborted() is true here without actually
949 // aborting the survivor scan earlier. This is OK as it's
950 // mainly used for sanity checking.
951 root_regions()->scan_finished();
952 }
953 }
954
955 void G1ConcurrentMark::concurrent_cycle_start() {
956 _gc_timer_cm->register_gc_start();
957
958 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
959
960 _g1h->trace_heap_before_gc(_gc_tracer_cm);
961 }
962
963 void G1ConcurrentMark::concurrent_cycle_end() {
964 _g1h->collector_state()->set_clearing_next_bitmap(false);
965
966 _g1h->trace_heap_after_gc(_gc_tracer_cm);
967
968 if (has_aborted()) {
969 log_info(gc, marking)("Concurrent Mark Abort");
970 _gc_tracer_cm->report_concurrent_mode_failure();
971 }
972
973 _gc_timer_cm->register_gc_end();
974
975 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
976 }
977
978 void G1ConcurrentMark::mark_from_roots() {
979 _restart_for_overflow = false;
980
981 _num_concurrent_workers = calc_active_marking_workers();
982
983 uint active_workers = MAX2(1U, _num_concurrent_workers);
984
985 // Setting active workers is not guaranteed since fewer
986 // worker threads may currently exist and more may not be
987 // available.
988 active_workers = _concurrent_workers->update_active_workers(active_workers);
989 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
990
991 // Parallel task terminator is set in "set_concurrency_and_phase()"
992 set_concurrency_and_phase(active_workers, true /* concurrent */);
993
994 G1CMConcurrentMarkingTask marking_task(this);
995 _concurrent_workers->run_task(&marking_task);
996 print_stats();
997 }
998
999 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1000 G1HeapVerifier* verifier = _g1h->verifier();
1001
1002 verifier->verify_region_sets_optional();
1003
1004 if (VerifyDuringGC) {
1005 GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
1006
1007 size_t const BufLen = 512;
1008 char buffer[BufLen];
1009
1010 os::snprintf(buffer, BufLen, "During GC (%s)", caller);
1011 verifier->verify(type, vo, buffer);
1012 }
1013
1014 verifier->check_bitmaps(caller);
1015 }
1016
1017 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1018 G1CollectedHeap* _g1h;
1019 G1ConcurrentMark* _cm;
1020
1021 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
1022
1023 void update_remset_before_rebuild(HeapRegion * hr) {
1024 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1025
1026 size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1027 bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1028 if (selected_for_rebuild) {
1029 _num_regions_selected_for_rebuild++;
1030 }
1031 _cm->update_top_at_rebuild_start(hr);
1032 }
1033
1034 public:
1035 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
1036 _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
1046 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1047 G1CollectedHeap* _g1h;
1048 public:
1049 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1050
1051 virtual bool do_heap_region(HeapRegion* r) {
1052 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1053 return false;
1054 }
1055 };
1056
1057 void G1ConcurrentMark::remark() {
1058 assert_at_safepoint_on_vm_thread();
1059
1060 // If a full collection has happened, we should not continue. However we might
1061 // have ended up here as the Remark VM operation has been scheduled already.
1062 if (has_aborted()) {
1063 return;
1064 }
1065
1066 G1Policy* g1p = _g1h->g1_policy();
1067 g1p->record_concurrent_mark_remark_start();
1068
1069 double start = os::elapsedTime();
1070
1071 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1072
1073 {
1074 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1075 finalize_marking();
1076 }
1077
1078 double mark_work_end = os::elapsedTime();
1079
1080 bool const mark_finished = !has_overflown();
1081 if (mark_finished) {
1082 weak_refs_work(false /* clear_all_soft_refs */);
1083
1084 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1085 // We're done with marking.
1086 // This is the end of the marking cycle, we're expected all
1087 // threads to have SATB queues with active set to true.
1088 satb_mq_set.set_active_all_threads(false, /* new active value */
1089 true /* expected_active */);
1090
1091 {
1092 GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1093 flush_all_task_caches();
1094 }
1095
1096 {
1097 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1098 G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1099 _g1h->heap_region_iterate(&cl);
1100 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1101 _g1h->num_regions(), cl.num_selected_for_rebuild());
1102 }
1103
1104 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");
1105
1106 assert(!restart_for_overflow(), "sanity");
1107 // Completely reset the marking state since marking completed
1108 reset_at_marking_complete();
1109 } else {
1110 // We overflowed. Restart concurrent marking.
1111 _restart_for_overflow = true;
1112
1113 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1114
1115 // Clear the marking state because we will be restarting
1116 // marking due to overflowing the global mark stack.
1117 reset_marking_for_restart();
1118 }
1119
1120 {
1121 GCTraceTime(Debug, gc, phases)("Report Object Count");
1122 report_object_count();
1123 }
1124
1125 // Statistics
1126 double now = os::elapsedTime();
1127 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1128 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1129 _remark_times.add((now - start) * 1000.0);
1130
1131 g1p->record_concurrent_mark_remark_end();
1132 }
1133
1134 class G1CleanupTask : public AbstractGangTask {
1135 // Per-region work during the Cleanup pause.
1136 class G1CleanupRegionsClosure : public HeapRegionClosure {
1137 G1CollectedHeap* _g1h;
1138 size_t _freed_bytes;
1139 FreeRegionList* _local_cleanup_list;
1140 uint _old_regions_removed;
1141 uint _humongous_regions_removed;
1142 HRRSCleanupTask* _hrrs_cleanup_task;
1143
1144 public:
1145 G1CleanupRegionsClosure(G1CollectedHeap* g1,
1146 FreeRegionList* local_cleanup_list,
1147 HRRSCleanupTask* hrrs_cleanup_task) :
1148 _g1h(g1),
1149 _freed_bytes(0),
1150 _local_cleanup_list(local_cleanup_list),
1151 _old_regions_removed(0),
1153 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1154
1155 size_t freed_bytes() { return _freed_bytes; }
1156 const uint old_regions_removed() { return _old_regions_removed; }
1157 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1158
1159 bool do_heap_region(HeapRegion *hr) {
1160 hr->note_end_of_marking();
1161
1162 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1163 _freed_bytes += hr->used();
1164 hr->set_containing_set(NULL);
1165 if (hr->is_humongous()) {
1166 _humongous_regions_removed++;
1167 _g1h->free_humongous_region(hr, _local_cleanup_list);
1168 } else {
1169 _old_regions_removed++;
1170 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1171 }
1172 hr->clear_cardtable();
1173 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1174 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1175 } else {
1176 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1177 }
1178
1179 return false;
1180 }
1181 };
1182
1183 G1CollectedHeap* _g1h;
1184 FreeRegionList* _cleanup_list;
1185 HeapRegionClaimer _hrclaimer;
1186
1187 public:
1188 G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1189 AbstractGangTask("G1 Cleanup"),
1190 _g1h(g1h),
1191 _cleanup_list(cleanup_list),
1192 _hrclaimer(n_workers) {
1193
1194 HeapRegionRemSet::reset_for_cleanup_tasks();
1208 {
1209 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1210 _g1h->decrement_summary_bytes(cl.freed_bytes());
1211
1212 _cleanup_list->add_ordered(&local_cleanup_list);
1213 assert(local_cleanup_list.is_empty(), "post-condition");
1214
1215 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1216 }
1217 }
1218 };
1219
1220 void G1ConcurrentMark::reclaim_empty_regions() {
1221 WorkGang* workers = _g1h->workers();
1222 FreeRegionList empty_regions_list("Empty Regions After Mark List");
1223
1224 G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
1225 workers->run_task(&cl);
1226
1227 if (!empty_regions_list.is_empty()) {
1228 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1229 // Now print the empty regions list.
1230 G1HRPrinter* hrp = _g1h->hr_printer();
1231 if (hrp->is_active()) {
1232 FreeRegionListIterator iter(&empty_regions_list);
1233 while (iter.more_available()) {
1234 HeapRegion* hr = iter.get_next();
1235 hrp->cleanup(hr);
1236 }
1237 }
1238 // And actually make them available.
1239 _g1h->prepend_to_freelist(&empty_regions_list);
1240 }
1241 }
1242
1243 void G1ConcurrentMark::cleanup() {
1244 assert_at_safepoint_on_vm_thread();
1245
1246 // If a full collection has happened, we shouldn't do this.
1247 if (has_aborted()) {
1248 return;
1249 }
1250
1251 G1Policy* g1p = _g1h->g1_policy();
1252 g1p->record_concurrent_mark_cleanup_start();
1253
1254 double start = os::elapsedTime();
1255
1256 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before");
1257
1258 {
1259 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
1260 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1261 _g1h->heap_region_iterate(&cl);
1262 }
1263
1264 if (log_is_enabled(Trace, gc, liveness)) {
1265 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1266 _g1h->heap_region_iterate(&cl);
1267 }
1268
1269 // Install newly created mark bitmap as "prev".
1270 swap_mark_bitmaps();
1271 {
1272 GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
1273 reclaim_empty_regions();
1274 }
1275
1276 // Cleanup will have freed any regions completely full of garbage.
1277 // Update the soft reference policy with the new heap occupancy.
1278 Universe::update_heap_info_at_gc();
1279
1280 // Clean out dead classes and update Metaspace sizes.
1281 if (ClassUnloadingWithConcurrentMark) {
1282 GCTraceTime(Debug, gc, phases)("Purge Metaspace");
1283 ClassLoaderDataGraph::purge();
1284 }
1285 MetaspaceGC::compute_new_size();
1286
1287 // We reclaimed old regions so we should calculate the sizes to make
1288 // sure we update the old gen/space data.
1289 _g1h->g1mm()->update_sizes();
1290
1291 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1292
1293 // We need to make this be a "collection" so any collection pause that
1294 // races with it goes around and waits for Cleanup to finish.
1295 _g1h->increment_total_collections();
1296
1297 // Local statistics
1298 double recent_cleanup_time = (os::elapsedTime() - start);
1299 _total_cleanup_time += recent_cleanup_time;
1300 _cleanup_times.add(recent_cleanup_time);
1301
1302 {
1303 GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
1304 _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1305 }
1306 }
1307
1308 // Supporting Object and Oop closures for reference discovery
1309 // and processing in during marking
1310
1311 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1312 HeapWord* addr = (HeapWord*)obj;
1313 return addr != NULL &&
1314 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj));
1315 }
1316
1317 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1318 // Uses the G1CMTask associated with a worker thread (for serial reference
1319 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1320 // trace referent objects.
1321 //
1322 // Using the G1CMTask and embedded local queues avoids having the worker
1323 // threads operating on the global mark stack. This reduces the risk
1324 // of overflowing the stack - which we would rather avoid at this late
1325 // state. Also using the tasks' local queues removes the potential
1500 }
1501 };
1502
1503 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1504 assert(_workers != NULL, "Need parallel worker threads.");
1505 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1506
1507 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1508
1509 // Not strictly necessary but...
1510 //
1511 // We need to reset the concurrency level before each
1512 // proxy task execution, so that the termination protocol
1513 // and overflow handling in G1CMTask::do_marking_step() knows
1514 // how many workers to wait for.
1515 _cm->set_concurrency(_active_workers);
1516 _workers->run_task(&enq_task_proxy);
1517 }
1518
1519 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1520 ResourceMark rm;
1521 HandleMark hm;
1522
1523 // Is alive closure.
1524 G1CMIsAliveClosure g1_is_alive(_g1h);
1525
1526 // Inner scope to exclude the cleaning of the string and symbol
1527 // tables from the displayed time.
1528 {
1529 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1530
1531 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1532
1533 // See the comment in G1CollectedHeap::ref_processing_init()
1534 // about how reference processing currently works in G1.
1535
1536 // Set the soft reference policy
1537 rp->setup_policy(clear_all_soft_refs);
1538 assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1539
1618 if (has_overflown()) {
1619 // We can not trust g1_is_alive if the marking stack overflowed
1620 return;
1621 }
1622
1623 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1624
1625 // Unload Klasses, String, Symbols, Code Cache, etc.
1626 if (ClassUnloadingWithConcurrentMark) {
1627 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1628 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1629 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1630 } else {
1631 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1632 // No need to clean string table and symbol table as they are treated as strong roots when
1633 // class unloading is disabled.
1634 _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1635 }
1636 }
1637
1638 void G1ConcurrentMark::report_object_count() {
1639 G1CMIsAliveClosure is_alive(_g1h);
1640 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1641 }
1642
1643 void G1ConcurrentMark::swap_mark_bitmaps() {
1644 G1CMBitMap* temp = _prev_mark_bitmap;
1645 _prev_mark_bitmap = _next_mark_bitmap;
1646 _next_mark_bitmap = temp;
1647 _g1h->collector_state()->set_clearing_next_bitmap(true);
1648 }
1649
1650 // Closure for marking entries in SATB buffers.
1651 class G1CMSATBBufferClosure : public SATBBufferClosure {
1652 private:
1653 G1CMTask* _task;
1654 G1CollectedHeap* _g1h;
1655
1656 // This is very similar to G1CMTask::deal_with_reference, but with
1657 // more relaxed requirements for the argument, so this must be more
1658 // circumspect about treating the argument as an object.
1659 void do_entry(void* entry) const {
1660 _task->increment_refs_reached();
1661 oop const obj = static_cast<oop>(entry);
1662 _task->make_reference_grey(obj);
1663 }
1664
1665 public:
1666 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1667 : _task(task), _g1h(g1h) { }
1725
1726 do {
1727 task->do_marking_step(1000000000.0 /* something very large */,
1728 true /* do_termination */,
1729 false /* is_serial */);
1730 } while (task->has_aborted() && !_cm->has_overflown());
1731 // If we overflow, then we do not want to restart. We instead
1732 // want to abort remark and do concurrent marking again.
1733 task->record_end_time();
1734 }
1735
1736 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1737 AbstractGangTask("Par Remark"), _cm(cm) {
1738 _cm->terminator()->reset_for_reuse(active_workers);
1739 }
1740 };
1741
1742 void G1ConcurrentMark::finalize_marking() {
1743 ResourceMark rm;
1744 HandleMark hm;
1745
1746 _g1h->ensure_parsability(false);
1747
1748 // this is remark, so we'll use up all active threads
1749 uint active_workers = _g1h->workers()->active_workers();
1750 set_concurrency_and_phase(active_workers, false /* concurrent */);
1751 // Leave _parallel_marking_threads at it's
1752 // value originally calculated in the G1ConcurrentMark
1753 // constructor and pass values of the active workers
1754 // through the gang in the task.
1755
1756 {
1757 StrongRootsScope srs(active_workers);
1758
1759 G1CMRemarkTask remarkTask(this, active_workers);
1760 // We will start all available threads, even if we decide that the
1761 // active_workers will be fewer. The extra ones will just bail out
1762 // immediately.
1763 _g1h->workers()->run_task(&remarkTask);
1764 }
|