836 // just abort the whole marking phase as quickly as possible.
837 return;
838 }
839
840 // If we're executing the concurrent phase of marking, reset the marking
841 // state; otherwise the marking state is reset after reference processing,
842 // during the remark pause.
843 // If we reset here as a result of an overflow during the remark we will
844 // see assertion failures from any subsequent set_concurrency_and_phase()
845 // calls.
846 if (concurrent()) {
847 // let the task associated with with worker 0 do this
848 if (worker_id == 0) {
849 // task 0 is responsible for clearing the global data structures
850 // We should be here because of an overflow. During STW we should
851 // not clear the overflow flag since we rely on it being true when
852 // we exit this method to abort the pause and restart concurrent
853 // marking.
854 reset_marking_state(true /* clear_overflow */);
855
856 log_info(gc)("Concurrent Mark reset for overflow");
857 }
858 }
859
860 // after this, each task should reset its own data structures then
861 // then go into the second barrier
862 }
863
864 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
865 SuspendibleThreadSetLeaver sts_leave(concurrent());
866 _second_overflow_barrier_sync.enter();
867
868 // at this point everything should be re-initialized and ready to go
869 }
870
871 class G1CMConcurrentMarkingTask: public AbstractGangTask {
872 private:
873 G1ConcurrentMark* _cm;
874 ConcurrentMarkThread* _cmt;
875
876 public:
971 private:
972 G1ConcurrentMark* _cm;
973
974 public:
975 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
976 AbstractGangTask("Root Region Scan"), _cm(cm) { }
977
978 void work(uint worker_id) {
979 assert(Thread::current()->is_ConcurrentGC_thread(),
980 "this should only be done by a conc GC thread");
981
982 G1CMRootRegions* root_regions = _cm->root_regions();
983 HeapRegion* hr = root_regions->claim_next();
984 while (hr != NULL) {
985 _cm->scanRootRegion(hr, worker_id);
986 hr = root_regions->claim_next();
987 }
988 }
989 };
990
991 void G1ConcurrentMark::scanRootRegions() {
992 // scan_in_progress() will have been set to true only if there was
993 // at least one root region to scan. So, if it's false, we
994 // should not attempt to do any further work.
995 if (root_regions()->scan_in_progress()) {
996 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
997 GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
998
999 _parallel_marking_threads = calc_parallel_marking_threads();
1000 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1001 "Maximum number of marking threads exceeded");
1002 uint active_workers = MAX2(1U, parallel_marking_threads());
1003
1004 G1CMRootRegionScanTask task(this);
1005 _parallel_workers->set_active_workers(active_workers);
1006 _parallel_workers->run_task(&task);
1007
1008 // It's possible that has_aborted() is true here without actually
1009 // aborting the survivor scan earlier. This is OK as it's
1010 // mainly used for sanity checking.
1011 root_regions()->scan_finished();
1012 }
1013 }
1014
1015 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1016 uint old_val = 0;
1017 do {
1035 _g1h->gc_timer_cm()->register_gc_end();
1036 }
1037 old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
1038 assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
1039 } else {
1040 do {
1041 // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
1042 os::naked_short_sleep(1);
1043 } while (_concurrent_phase_status != ConcPhaseNotStarted);
1044 }
1045 }
1046
1047 void G1ConcurrentMark::register_concurrent_phase_end() {
1048 register_concurrent_phase_end_common(false);
1049 }
1050
1051 void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
1052 register_concurrent_phase_end_common(true);
1053 }
1054
1055 void G1ConcurrentMark::markFromRoots() {
1056 // we might be tempted to assert that:
1057 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1058 // "inconsistent argument?");
1059 // However that wouldn't be right, because it's possible that
1060 // a safepoint is indeed in progress as a younger generation
1061 // stop-the-world GC happens even as we mark in this generation.
1062
1063 _restart_for_overflow = false;
1064
1065 // _g1h has _n_par_threads
1066 _parallel_marking_threads = calc_parallel_marking_threads();
1067 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1068 "Maximum number of marking threads exceeded");
1069
1070 uint active_workers = MAX2(1U, parallel_marking_threads());
1071 assert(active_workers > 0, "Should have been set");
1072
1073 // Parallel task terminator is set in "set_concurrency_and_phase()"
1074 set_concurrency_and_phase(active_workers, true /* concurrent */);
1075
1098 HandleMark hm; // handle scope
1099 g1h->prepare_for_verify();
1100 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1101 }
1102 g1h->verifier()->check_bitmaps("Remark Start");
1103
1104 G1CollectorPolicy* g1p = g1h->g1_policy();
1105 g1p->record_concurrent_mark_remark_start();
1106
1107 double start = os::elapsedTime();
1108
1109 checkpointRootsFinalWork();
1110
1111 double mark_work_end = os::elapsedTime();
1112
1113 weakRefsWork(clear_all_soft_refs);
1114
1115 if (has_overflown()) {
1116 // Oops. We overflowed. Restart concurrent marking.
1117 _restart_for_overflow = true;
1118 log_develop_trace(gc)("Remark led to restart for overflow.");
1119
1120 // Verify the heap w.r.t. the previous marking bitmap.
1121 if (VerifyDuringGC) {
1122 HandleMark hm; // handle scope
1123 g1h->prepare_for_verify();
1124 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1125 }
1126
1127 // Clear the marking state because we will be restarting
1128 // marking due to overflowing the global mark stack.
1129 reset_marking_state();
1130 } else {
1131 {
1132 GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
1133
1134 // Aggregate the per-task counting data that we have accumulated
1135 // while marking.
1136 aggregate_count_data();
1137 }
1138
1744 g1h->verifier()->verify_region_sets_optional();
1745
1746 // We need to make this be a "collection" so any collection pause that
1747 // races with it goes around and waits for completeCleanup to finish.
1748 g1h->increment_total_collections();
1749
1750 // Clean out dead classes and update Metaspace sizes.
1751 if (ClassUnloadingWithConcurrentMark) {
1752 ClassLoaderDataGraph::purge();
1753 }
1754 MetaspaceGC::compute_new_size();
1755
1756 // We reclaimed old regions so we should calculate the sizes to make
1757 // sure we update the old gen/space data.
1758 g1h->g1mm()->update_sizes();
1759 g1h->allocation_context_stats().update_after_mark();
1760
1761 g1h->trace_heap_after_concurrent_cycle();
1762 }
1763
1764 void G1ConcurrentMark::completeCleanup() {
1765 if (has_aborted()) return;
1766
1767 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1768
1769 _cleanup_list.verify_optional();
1770 FreeRegionList tmp_free_list("Tmp Free List");
1771
1772 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1773 "cleanup list has %u entries",
1774 _cleanup_list.length());
1775
1776 // No one else should be accessing the _cleanup_list at this point,
1777 // so it is not necessary to take any locks
1778 while (!_cleanup_list.is_empty()) {
1779 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1780 assert(hr != NULL, "Got NULL from a non-empty list");
1781 hr->par_clear();
1782 tmp_free_list.add_ordered(hr);
1783
1784 // Instead of adding one region at a time to the secondary_free_list,
|
836 // just abort the whole marking phase as quickly as possible.
837 return;
838 }
839
840 // If we're executing the concurrent phase of marking, reset the marking
841 // state; otherwise the marking state is reset after reference processing,
842 // during the remark pause.
843 // If we reset here as a result of an overflow during the remark we will
844 // see assertion failures from any subsequent set_concurrency_and_phase()
845 // calls.
846 if (concurrent()) {
847 // let the task associated with with worker 0 do this
848 if (worker_id == 0) {
849 // task 0 is responsible for clearing the global data structures
850 // We should be here because of an overflow. During STW we should
851 // not clear the overflow flag since we rely on it being true when
852 // we exit this method to abort the pause and restart concurrent
853 // marking.
854 reset_marking_state(true /* clear_overflow */);
855
856 log_info(gc, marking)("Concurrent Mark reset for overflow");
857 }
858 }
859
860 // after this, each task should reset its own data structures then
861 // then go into the second barrier
862 }
863
864 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
865 SuspendibleThreadSetLeaver sts_leave(concurrent());
866 _second_overflow_barrier_sync.enter();
867
868 // at this point everything should be re-initialized and ready to go
869 }
870
871 class G1CMConcurrentMarkingTask: public AbstractGangTask {
872 private:
873 G1ConcurrentMark* _cm;
874 ConcurrentMarkThread* _cmt;
875
876 public:
971 private:
972 G1ConcurrentMark* _cm;
973
974 public:
975 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
976 AbstractGangTask("Root Region Scan"), _cm(cm) { }
977
978 void work(uint worker_id) {
979 assert(Thread::current()->is_ConcurrentGC_thread(),
980 "this should only be done by a conc GC thread");
981
982 G1CMRootRegions* root_regions = _cm->root_regions();
983 HeapRegion* hr = root_regions->claim_next();
984 while (hr != NULL) {
985 _cm->scanRootRegion(hr, worker_id);
986 hr = root_regions->claim_next();
987 }
988 }
989 };
990
991 void G1ConcurrentMark::scan_root_regions() {
992 // scan_in_progress() will have been set to true only if there was
993 // at least one root region to scan. So, if it's false, we
994 // should not attempt to do any further work.
995 if (root_regions()->scan_in_progress()) {
996 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
997
998 _parallel_marking_threads = calc_parallel_marking_threads();
999 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1000 "Maximum number of marking threads exceeded");
1001 uint active_workers = MAX2(1U, parallel_marking_threads());
1002
1003 G1CMRootRegionScanTask task(this);
1004 _parallel_workers->set_active_workers(active_workers);
1005 _parallel_workers->run_task(&task);
1006
1007 // It's possible that has_aborted() is true here without actually
1008 // aborting the survivor scan earlier. This is OK as it's
1009 // mainly used for sanity checking.
1010 root_regions()->scan_finished();
1011 }
1012 }
1013
1014 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1015 uint old_val = 0;
1016 do {
1034 _g1h->gc_timer_cm()->register_gc_end();
1035 }
1036 old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
1037 assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
1038 } else {
1039 do {
1040 // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
1041 os::naked_short_sleep(1);
1042 } while (_concurrent_phase_status != ConcPhaseNotStarted);
1043 }
1044 }
1045
1046 void G1ConcurrentMark::register_concurrent_phase_end() {
1047 register_concurrent_phase_end_common(false);
1048 }
1049
1050 void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
1051 register_concurrent_phase_end_common(true);
1052 }
1053
1054 void G1ConcurrentMark::mark_from_roots() {
1055 // we might be tempted to assert that:
1056 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1057 // "inconsistent argument?");
1058 // However that wouldn't be right, because it's possible that
1059 // a safepoint is indeed in progress as a younger generation
1060 // stop-the-world GC happens even as we mark in this generation.
1061
1062 _restart_for_overflow = false;
1063
1064 // _g1h has _n_par_threads
1065 _parallel_marking_threads = calc_parallel_marking_threads();
1066 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1067 "Maximum number of marking threads exceeded");
1068
1069 uint active_workers = MAX2(1U, parallel_marking_threads());
1070 assert(active_workers > 0, "Should have been set");
1071
1072 // Parallel task terminator is set in "set_concurrency_and_phase()"
1073 set_concurrency_and_phase(active_workers, true /* concurrent */);
1074
1097 HandleMark hm; // handle scope
1098 g1h->prepare_for_verify();
1099 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1100 }
1101 g1h->verifier()->check_bitmaps("Remark Start");
1102
1103 G1CollectorPolicy* g1p = g1h->g1_policy();
1104 g1p->record_concurrent_mark_remark_start();
1105
1106 double start = os::elapsedTime();
1107
1108 checkpointRootsFinalWork();
1109
1110 double mark_work_end = os::elapsedTime();
1111
1112 weakRefsWork(clear_all_soft_refs);
1113
1114 if (has_overflown()) {
1115 // Oops. We overflowed. Restart concurrent marking.
1116 _restart_for_overflow = true;
1117
1118 // Verify the heap w.r.t. the previous marking bitmap.
1119 if (VerifyDuringGC) {
1120 HandleMark hm; // handle scope
1121 g1h->prepare_for_verify();
1122 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1123 }
1124
1125 // Clear the marking state because we will be restarting
1126 // marking due to overflowing the global mark stack.
1127 reset_marking_state();
1128 } else {
1129 {
1130 GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
1131
1132 // Aggregate the per-task counting data that we have accumulated
1133 // while marking.
1134 aggregate_count_data();
1135 }
1136
1742 g1h->verifier()->verify_region_sets_optional();
1743
1744 // We need to make this be a "collection" so any collection pause that
1745 // races with it goes around and waits for completeCleanup to finish.
1746 g1h->increment_total_collections();
1747
1748 // Clean out dead classes and update Metaspace sizes.
1749 if (ClassUnloadingWithConcurrentMark) {
1750 ClassLoaderDataGraph::purge();
1751 }
1752 MetaspaceGC::compute_new_size();
1753
1754 // We reclaimed old regions so we should calculate the sizes to make
1755 // sure we update the old gen/space data.
1756 g1h->g1mm()->update_sizes();
1757 g1h->allocation_context_stats().update_after_mark();
1758
1759 g1h->trace_heap_after_concurrent_cycle();
1760 }
1761
1762 void G1ConcurrentMark::complete_cleanup() {
1763 if (has_aborted()) return;
1764
1765 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1766
1767 _cleanup_list.verify_optional();
1768 FreeRegionList tmp_free_list("Tmp Free List");
1769
1770 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1771 "cleanup list has %u entries",
1772 _cleanup_list.length());
1773
1774 // No one else should be accessing the _cleanup_list at this point,
1775 // so it is not necessary to take any locks
1776 while (!_cleanup_list.is_empty()) {
1777 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1778 assert(hr != NULL, "Got NULL from a non-empty list");
1779 hr->par_clear();
1780 tmp_free_list.add_ordered(hr);
1781
1782 // Instead of adding one region at a time to the secondary_free_list,
|