< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 10464 : imported patch 8151126-clean-up-duplicate-code-for-clearing-bitmaps
rev 10465 : [mq]: 8151614-improve-concurrent-mark-logging


 835     // just abort the whole marking phase as quickly as possible.
 836     return;
 837   }
 838 
 839   // If we're executing the concurrent phase of marking, reset the marking
 840   // state; otherwise the marking state is reset after reference processing,
 841   // during the remark pause.
 842   // If we reset here as a result of an overflow during the remark we will
 843   // see assertion failures from any subsequent set_concurrency_and_phase()
 844   // calls.
 845   if (concurrent()) {
 846     // let the task associated with with worker 0 do this
 847     if (worker_id == 0) {
 848       // task 0 is responsible for clearing the global data structures
 849       // We should be here because of an overflow. During STW we should
 850       // not clear the overflow flag since we rely on it being true when
 851       // we exit this method to abort the pause and restart concurrent
 852       // marking.
 853       reset_marking_state(true /* clear_overflow */);
 854 
 855       log_info(gc)("Concurrent Mark reset for overflow");
 856     }
 857   }
 858 
 859   // after this, each task should reset its own data structures then
 860   // then go into the second barrier
 861 }
 862 
 863 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 864   SuspendibleThreadSetLeaver sts_leave(concurrent());
 865   _second_overflow_barrier_sync.enter();
 866 
 867   // at this point everything should be re-initialized and ready to go
 868 }
 869 
 870 class G1CMConcurrentMarkingTask: public AbstractGangTask {
 871 private:
 872   G1ConcurrentMark*     _cm;
 873   ConcurrentMarkThread* _cmt;
 874 
 875 public:


 970 private:
 971   G1ConcurrentMark* _cm;
 972 
 973 public:
 974   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 975     AbstractGangTask("Root Region Scan"), _cm(cm) { }
 976 
 977   void work(uint worker_id) {
 978     assert(Thread::current()->is_ConcurrentGC_thread(),
 979            "this should only be done by a conc GC thread");
 980 
 981     G1CMRootRegions* root_regions = _cm->root_regions();
 982     HeapRegion* hr = root_regions->claim_next();
 983     while (hr != NULL) {
 984       _cm->scanRootRegion(hr, worker_id);
 985       hr = root_regions->claim_next();
 986     }
 987   }
 988 };
 989 
 990 void G1ConcurrentMark::scanRootRegions() {
 991   // scan_in_progress() will have been set to true only if there was
 992   // at least one root region to scan. So, if it's false, we
 993   // should not attempt to do any further work.
 994   if (root_regions()->scan_in_progress()) {
 995     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 996     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 997 
 998     _parallel_marking_threads = calc_parallel_marking_threads();
 999     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1000            "Maximum number of marking threads exceeded");
1001     uint active_workers = MAX2(1U, parallel_marking_threads());
1002 
1003     G1CMRootRegionScanTask task(this);
1004     _parallel_workers->set_active_workers(active_workers);
1005     _parallel_workers->run_task(&task);
1006 
1007     // It's possible that has_aborted() is true here without actually
1008     // aborting the survivor scan earlier. This is OK as it's
1009     // mainly used for sanity checking.
1010     root_regions()->scan_finished();
1011   }
1012 }
1013 
1014 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1015   uint old_val = 0;
1016   do {


1034       _g1h->gc_timer_cm()->register_gc_end();
1035     }
1036     old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
1037     assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
1038   } else {
1039     do {
1040       // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
1041       os::naked_short_sleep(1);
1042     } while (_concurrent_phase_status != ConcPhaseNotStarted);
1043   }
1044 }
1045 
1046 void G1ConcurrentMark::register_concurrent_phase_end() {
1047   register_concurrent_phase_end_common(false);
1048 }
1049 
1050 void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
1051   register_concurrent_phase_end_common(true);
1052 }
1053 
1054 void G1ConcurrentMark::markFromRoots() {
1055   // we might be tempted to assert that:
1056   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1057   //        "inconsistent argument?");
1058   // However that wouldn't be right, because it's possible that
1059   // a safepoint is indeed in progress as a younger generation
1060   // stop-the-world GC happens even as we mark in this generation.
1061 
1062   _restart_for_overflow = false;
1063 
1064   // _g1h has _n_par_threads
1065   _parallel_marking_threads = calc_parallel_marking_threads();
1066   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1067     "Maximum number of marking threads exceeded");
1068 
1069   uint active_workers = MAX2(1U, parallel_marking_threads());
1070   assert(active_workers > 0, "Should have been set");
1071 
1072   // Parallel task terminator is set in "set_concurrency_and_phase()"
1073   set_concurrency_and_phase(active_workers, true /* concurrent */);
1074 


1097     HandleMark hm;  // handle scope
1098     g1h->prepare_for_verify();
1099     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1100   }
1101   g1h->verifier()->check_bitmaps("Remark Start");
1102 
1103   G1CollectorPolicy* g1p = g1h->g1_policy();
1104   g1p->record_concurrent_mark_remark_start();
1105 
1106   double start = os::elapsedTime();
1107 
1108   checkpointRootsFinalWork();
1109 
1110   double mark_work_end = os::elapsedTime();
1111 
1112   weakRefsWork(clear_all_soft_refs);
1113 
1114   if (has_overflown()) {
1115     // Oops.  We overflowed.  Restart concurrent marking.
1116     _restart_for_overflow = true;
1117     log_develop_trace(gc)("Remark led to restart for overflow.");
1118 
1119     // Verify the heap w.r.t. the previous marking bitmap.
1120     if (VerifyDuringGC) {
1121       HandleMark hm;  // handle scope
1122       g1h->prepare_for_verify();
1123       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1124     }
1125 
1126     // Clear the marking state because we will be restarting
1127     // marking due to overflowing the global mark stack.
1128     reset_marking_state();
1129   } else {
1130     {
1131       GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
1132 
1133       // Aggregate the per-task counting data that we have accumulated
1134       // while marking.
1135       aggregate_count_data();
1136     }
1137 


1743   g1h->verifier()->verify_region_sets_optional();
1744 
1745   // We need to make this be a "collection" so any collection pause that
1746   // races with it goes around and waits for completeCleanup to finish.
1747   g1h->increment_total_collections();
1748 
1749   // Clean out dead classes and update Metaspace sizes.
1750   if (ClassUnloadingWithConcurrentMark) {
1751     ClassLoaderDataGraph::purge();
1752   }
1753   MetaspaceGC::compute_new_size();
1754 
1755   // We reclaimed old regions so we should calculate the sizes to make
1756   // sure we update the old gen/space data.
1757   g1h->g1mm()->update_sizes();
1758   g1h->allocation_context_stats().update_after_mark();
1759 
1760   g1h->trace_heap_after_concurrent_cycle();
1761 }
1762 
1763 void G1ConcurrentMark::completeCleanup() {
1764   if (has_aborted()) return;
1765 
1766   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1767 
1768   _cleanup_list.verify_optional();
1769   FreeRegionList tmp_free_list("Tmp Free List");
1770 
1771   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1772                                   "cleanup list has %u entries",
1773                                   _cleanup_list.length());
1774 
1775   // No one else should be accessing the _cleanup_list at this point,
1776   // so it is not necessary to take any locks
1777   while (!_cleanup_list.is_empty()) {
1778     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1779     assert(hr != NULL, "Got NULL from a non-empty list");
1780     hr->par_clear();
1781     tmp_free_list.add_ordered(hr);
1782 
1783     // Instead of adding one region at a time to the secondary_free_list,




 835     // just abort the whole marking phase as quickly as possible.
 836     return;
 837   }
 838 
 839   // If we're executing the concurrent phase of marking, reset the marking
 840   // state; otherwise the marking state is reset after reference processing,
 841   // during the remark pause.
 842   // If we reset here as a result of an overflow during the remark we will
 843   // see assertion failures from any subsequent set_concurrency_and_phase()
 844   // calls.
 845   if (concurrent()) {
 846     // let the task associated with with worker 0 do this
 847     if (worker_id == 0) {
 848       // task 0 is responsible for clearing the global data structures
 849       // We should be here because of an overflow. During STW we should
 850       // not clear the overflow flag since we rely on it being true when
 851       // we exit this method to abort the pause and restart concurrent
 852       // marking.
 853       reset_marking_state(true /* clear_overflow */);
 854 
 855       log_info(gc, marking)("Concurrent Mark reset for overflow");
 856     }
 857   }
 858 
 859   // after this, each task should reset its own data structures then
 860   // then go into the second barrier
 861 }
 862 
 863 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 864   SuspendibleThreadSetLeaver sts_leave(concurrent());
 865   _second_overflow_barrier_sync.enter();
 866 
 867   // at this point everything should be re-initialized and ready to go
 868 }
 869 
 870 class G1CMConcurrentMarkingTask: public AbstractGangTask {
 871 private:
 872   G1ConcurrentMark*     _cm;
 873   ConcurrentMarkThread* _cmt;
 874 
 875 public:


 970 private:
 971   G1ConcurrentMark* _cm;
 972 
 973 public:
 974   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 975     AbstractGangTask("Root Region Scan"), _cm(cm) { }
 976 
 977   void work(uint worker_id) {
 978     assert(Thread::current()->is_ConcurrentGC_thread(),
 979            "this should only be done by a conc GC thread");
 980 
 981     G1CMRootRegions* root_regions = _cm->root_regions();
 982     HeapRegion* hr = root_regions->claim_next();
 983     while (hr != NULL) {
 984       _cm->scanRootRegion(hr, worker_id);
 985       hr = root_regions->claim_next();
 986     }
 987   }
 988 };
 989 
 990 void G1ConcurrentMark::scan_root_regions() {
 991   // scan_in_progress() will have been set to true only if there was
 992   // at least one root region to scan. So, if it's false, we
 993   // should not attempt to do any further work.
 994   if (root_regions()->scan_in_progress()) {
 995     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");

 996 
 997     _parallel_marking_threads = calc_parallel_marking_threads();
 998     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
 999            "Maximum number of marking threads exceeded");
1000     uint active_workers = MAX2(1U, parallel_marking_threads());
1001 
1002     G1CMRootRegionScanTask task(this);
1003     _parallel_workers->set_active_workers(active_workers);
1004     _parallel_workers->run_task(&task);
1005 
1006     // It's possible that has_aborted() is true here without actually
1007     // aborting the survivor scan earlier. This is OK as it's
1008     // mainly used for sanity checking.
1009     root_regions()->scan_finished();
1010   }
1011 }
1012 
1013 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1014   uint old_val = 0;
1015   do {


1033       _g1h->gc_timer_cm()->register_gc_end();
1034     }
1035     old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
1036     assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
1037   } else {
1038     do {
1039       // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
1040       os::naked_short_sleep(1);
1041     } while (_concurrent_phase_status != ConcPhaseNotStarted);
1042   }
1043 }
1044 
1045 void G1ConcurrentMark::register_concurrent_phase_end() {
1046   register_concurrent_phase_end_common(false);
1047 }
1048 
1049 void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
1050   register_concurrent_phase_end_common(true);
1051 }
1052 
1053 void G1ConcurrentMark::mark_from_roots() {
1054   // we might be tempted to assert that:
1055   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1056   //        "inconsistent argument?");
1057   // However that wouldn't be right, because it's possible that
1058   // a safepoint is indeed in progress as a younger generation
1059   // stop-the-world GC happens even as we mark in this generation.
1060 
1061   _restart_for_overflow = false;
1062 
1063   // _g1h has _n_par_threads
1064   _parallel_marking_threads = calc_parallel_marking_threads();
1065   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1066     "Maximum number of marking threads exceeded");
1067 
1068   uint active_workers = MAX2(1U, parallel_marking_threads());
1069   assert(active_workers > 0, "Should have been set");
1070 
1071   // Parallel task terminator is set in "set_concurrency_and_phase()"
1072   set_concurrency_and_phase(active_workers, true /* concurrent */);
1073 


1096     HandleMark hm;  // handle scope
1097     g1h->prepare_for_verify();
1098     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1099   }
1100   g1h->verifier()->check_bitmaps("Remark Start");
1101 
1102   G1CollectorPolicy* g1p = g1h->g1_policy();
1103   g1p->record_concurrent_mark_remark_start();
1104 
1105   double start = os::elapsedTime();
1106 
1107   checkpointRootsFinalWork();
1108 
1109   double mark_work_end = os::elapsedTime();
1110 
1111   weakRefsWork(clear_all_soft_refs);
1112 
1113   if (has_overflown()) {
1114     // Oops.  We overflowed.  Restart concurrent marking.
1115     _restart_for_overflow = true;

1116 
1117     // Verify the heap w.r.t. the previous marking bitmap.
1118     if (VerifyDuringGC) {
1119       HandleMark hm;  // handle scope
1120       g1h->prepare_for_verify();
1121       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1122     }
1123 
1124     // Clear the marking state because we will be restarting
1125     // marking due to overflowing the global mark stack.
1126     reset_marking_state();
1127   } else {
1128     {
1129       GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
1130 
1131       // Aggregate the per-task counting data that we have accumulated
1132       // while marking.
1133       aggregate_count_data();
1134     }
1135 


1741   g1h->verifier()->verify_region_sets_optional();
1742 
1743   // We need to make this be a "collection" so any collection pause that
1744   // races with it goes around and waits for completeCleanup to finish.
1745   g1h->increment_total_collections();
1746 
1747   // Clean out dead classes and update Metaspace sizes.
1748   if (ClassUnloadingWithConcurrentMark) {
1749     ClassLoaderDataGraph::purge();
1750   }
1751   MetaspaceGC::compute_new_size();
1752 
1753   // We reclaimed old regions so we should calculate the sizes to make
1754   // sure we update the old gen/space data.
1755   g1h->g1mm()->update_sizes();
1756   g1h->allocation_context_stats().update_after_mark();
1757 
1758   g1h->trace_heap_after_concurrent_cycle();
1759 }
1760 
1761 void G1ConcurrentMark::complete_cleanup() {
1762   if (has_aborted()) return;
1763 
1764   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1765 
1766   _cleanup_list.verify_optional();
1767   FreeRegionList tmp_free_list("Tmp Free List");
1768 
1769   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1770                                   "cleanup list has %u entries",
1771                                   _cleanup_list.length());
1772 
1773   // No one else should be accessing the _cleanup_list at this point,
1774   // so it is not necessary to take any locks
1775   while (!_cleanup_list.is_empty()) {
1776     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1777     assert(hr != NULL, "Got NULL from a non-empty list");
1778     hr->par_clear();
1779     tmp_free_list.add_ordered(hr);
1780 
1781     // Instead of adding one region at a time to the secondary_free_list,


< prev index next >