src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 2891 : 7120038: G1: ParallelGCThreads==0 is broken
Summary: Running G1 with ParallelGCThreads==0 results in various crashes and asserts. Most of these are caused by unguarded references to the worker threads array or an incorrect number of active workers.
Reviewed-by:


1102     the_task->record_end_time();
1103     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1104 
1105     ConcurrentGCThread::stsLeave();
1106 
1107     double end_vtime = os::elapsedVTime();
1108     _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
1109   }
1110 
1111   CMConcurrentMarkingTask(ConcurrentMark* cm,
1112                           ConcurrentMarkThread* cmt) :
1113       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1114 
1115   ~CMConcurrentMarkingTask() { }
1116 };
1117 
1118 // Calculates the number of active workers for a concurrent
1119 // phase.
1120 int ConcurrentMark::calc_parallel_marking_threads() {
1121 
1122   size_t n_conc_workers;
1123   if (!G1CollectedHeap::use_parallel_gc_threads()) {
1124     n_conc_workers = 1;
1125   } else {
1126     if (!UseDynamicNumberOfGCThreads ||
1127         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1128          !ForceDynamicNumberOfGCThreads)) {
1129       n_conc_workers = max_parallel_marking_threads();
1130     } else {
1131       n_conc_workers =
1132         AdaptiveSizePolicy::calc_default_active_workers(
1133                                      max_parallel_marking_threads(),
1134                                      1, /* Minimum workers */
1135                                      parallel_marking_threads(),
1136                                      Threads::number_of_non_daemon_threads());
1137       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1138       // that scaling has already gone into "_max_parallel_marking_threads".
1139     }
1140   }
1141   assert(n_conc_workers > 0, "Always need at least 1");
1142   return (int) MAX2(n_conc_workers, (size_t) 1);

1143 }
1144 
1145 void ConcurrentMark::markFromRoots() {
1146   // we might be tempted to assert that:
1147   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1148   //        "inconsistent argument?");
1149   // However that wouldn't be right, because it's possible that
1150   // a safepoint is indeed in progress as a younger generation
1151   // stop-the-world GC happens even as we mark in this generation.
1152 
1153   _restart_for_overflow = false;
1154 
1155   // Parallel task terminator is set in "set_phase()".
1156   force_overflow_conc()->init();
1157 
1158   // _g1h has _n_par_threads
1159 
1160   _parallel_marking_threads = calc_parallel_marking_threads();
1161   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1162     "Maximum number of marking threads exceeded");
1163   _parallel_workers->set_active_workers((int)_parallel_marking_threads);
1164   // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1165   // and the decisions on that MT processing is made elsewhere.
1166 
1167   assert( _parallel_workers->active_workers() > 0, "Should have been set");
1168   set_phase(_parallel_workers->active_workers(), true /* concurrent */);


1169 
1170   CMConcurrentMarkingTask markingTask(this, cmThread());
1171   if (parallel_marking_threads() > 0) {




1172     _parallel_workers->run_task(&markingTask);
1173   } else {
1174     markingTask.work(0);
1175   }
1176   print_stats();
1177 }
1178 
1179 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1180   // world is stopped at this checkpoint
1181   assert(SafepointSynchronize::is_at_safepoint(),
1182          "world should be stopped");
1183 
1184   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1185 
1186   // If a full collection has happened, we shouldn't do this.
1187   if (has_aborted()) {
1188     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1189     return;
1190   }
1191 


1748 
1749   HRSPhaseSetter x(HRSPhaseCleanup);
1750   g1h->verify_region_sets_optional();
1751 
1752   if (VerifyDuringGC) {
1753     HandleMark hm;  // handle scope
1754     gclog_or_tty->print(" VerifyDuringGC:(before)");
1755     Universe::heap()->prepare_for_verify();
1756     Universe::verify(/* allow dirty */ true,
1757                      /* silent      */ false,
1758                      /* option      */ VerifyOption_G1UsePrevMarking);
1759   }
1760 
1761   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1762   g1p->record_concurrent_mark_cleanup_start();
1763 
1764   double start = os::elapsedTime();
1765 
1766   HeapRegionRemSet::reset_for_cleanup_tasks();
1767 
1768   g1h->set_par_threads();
1769   size_t n_workers = g1h->n_par_threads();
1770 
1771   // Do counting once more with the world stopped for good measure.
1772   G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
1773                                         &_region_bm, &_card_bm);
1774   if (G1CollectedHeap::use_parallel_gc_threads()) {
1775     assert(g1h->check_heap_region_claim_values(
1776                                                HeapRegion::InitialClaimValue),
1777            "sanity check");
1778 


1779     assert(g1h->n_par_threads() == (int) n_workers,
1780       "Should not have been reset");
1781     g1h->workers()->run_task(&g1_par_count_task);
1782     // Done with the parallel phase so reset to 0.
1783     g1h->set_par_threads(0);
1784 
1785     assert(g1h->check_heap_region_claim_values(
1786                                              HeapRegion::FinalCountClaimValue),
1787            "sanity check");
1788   } else {

1789     g1_par_count_task.work(0);
1790   }
1791 
1792   size_t known_garbage_bytes =
1793     g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
1794   g1p->set_known_garbage_bytes(known_garbage_bytes);
1795 
1796   size_t start_used_bytes = g1h->used();
1797   _at_least_one_mark_complete = true;
1798   g1h->set_marking_complete();
1799 
1800   ergo_verbose4(ErgoConcCycles,
1801            "finish cleanup",
1802            ergo_format_byte("occupancy")
1803            ergo_format_byte("capacity")
1804            ergo_format_byte_perc("known garbage"),
1805            start_used_bytes, g1h->capacity(),
1806            known_garbage_bytes,
1807            ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
1808 


1834     g1h->set_par_threads(0);
1835 
1836     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
1837            "sanity check");
1838   } else {
1839     g1_par_note_end_task.work(0);
1840   }
1841 
1842   if (!cleanup_list_is_empty()) {
1843     // The cleanup list is not empty, so we'll have to process it
1844     // concurrently. Notify anyone else that might be wanting free
1845     // regions that there will be more free regions coming soon.
1846     g1h->set_free_regions_coming();
1847   }
1848   double note_end_end = os::elapsedTime();
1849   if (G1PrintParCleanupStats) {
1850     gclog_or_tty->print_cr("  note end of marking: %8.3f ms.",
1851                            (note_end_end - note_end_start)*1000.0);
1852   }
1853 
1854 
1855   // call below, since it affects the metric by which we sort the heap
1856   // regions.
1857   if (G1ScrubRemSets) {
1858     double rs_scrub_start = os::elapsedTime();
1859     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
1860     if (G1CollectedHeap::use_parallel_gc_threads()) {
1861       g1h->set_par_threads((int)n_workers);
1862       g1h->workers()->run_task(&g1_par_scrub_rs_task);
1863       g1h->set_par_threads(0);
1864 
1865       assert(g1h->check_heap_region_claim_values(
1866                                             HeapRegion::ScrubRemSetClaimValue),
1867              "sanity check");
1868     } else {
1869       g1_par_scrub_rs_task.work(0);
1870     }
1871 
1872     double rs_scrub_end = os::elapsedTime();
1873     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1874     _total_rs_scrub_time += this_rs_scrub_time;


2312   ConcurrentMark *_cm;
2313 
2314 public:
2315   void work(int worker_i) {
2316     // Since all available tasks are actually started, we should
2317     // only proceed if we're supposed to be actived.
2318     if ((size_t)worker_i < _cm->active_tasks()) {
2319       CMTask* task = _cm->task(worker_i);
2320       task->record_start_time();
2321       do {
2322         task->do_marking_step(1000000000.0 /* something very large */,
2323                               true /* do_stealing    */,
2324                               true /* do_termination */);
2325       } while (task->has_aborted() && !_cm->has_overflown());
2326       // If we overflow, then we do not want to restart. We instead
2327       // want to abort remark and do concurrent marking again.
2328       task->record_end_time();
2329     }
2330   }
2331 
2332   CMRemarkTask(ConcurrentMark* cm) :
2333     AbstractGangTask("Par Remark"), _cm(cm) {
2334     _cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers());
2335   }
2336 };
2337 
2338 void ConcurrentMark::checkpointRootsFinalWork() {
2339   ResourceMark rm;
2340   HandleMark   hm;
2341   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2342 
2343   g1h->ensure_parsability(false);
2344 
2345   if (G1CollectedHeap::use_parallel_gc_threads()) {
2346     G1CollectedHeap::StrongRootsScope srs(g1h);
2347     // this is remark, so we'll use up all active threads
2348     int active_workers = g1h->workers()->active_workers();
2349     if (active_workers == 0) {
2350       assert(active_workers > 0, "Should have been set earlier");
2351       active_workers = ParallelGCThreads;
2352       g1h->workers()->set_active_workers(active_workers);
2353     }
2354     set_phase(active_workers, false /* concurrent */);
2355     // Leave _parallel_marking_threads at it's
2356     // value originally calculated in the ConcurrentMark
2357     // constructor and pass values of the active workers
2358     // through the gang in the task.
2359 
2360     CMRemarkTask remarkTask(this);
2361     g1h->set_par_threads(active_workers);
2362     g1h->workers()->run_task(&remarkTask);
2363     g1h->set_par_threads(0);
2364   } else {
2365     G1CollectedHeap::StrongRootsScope srs(g1h);
2366     // this is remark, so we'll use up all available threads
2367     int active_workers = 1;
2368     set_phase(active_workers, false /* concurrent */);
2369 
2370     CMRemarkTask remarkTask(this);
2371     // We will start all available threads, even if we decide that the
2372     // active_workers will be fewer. The extra ones will just bail out
2373     // immediately.
2374     remarkTask.work(0);
2375   }
2376   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2377   guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
2378 
2379   print_stats();
2380 
2381 #if VERIFY_OBJS_PROCESSED
2382   if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2383     gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2384                            _scan_obj_cl.objs_processed,
2385                            ThreadLocalObjQueue::objs_enqueued);
2386     guarantee(_scan_obj_cl.objs_processed ==
2387               ThreadLocalObjQueue::objs_enqueued,
2388               "Different number of objs processed and enqueued.");
2389   }
2390 #endif


3106                               ConcurrentMark* cm) :
3107     AbstractGangTask("Complete Mark in CSet"),
3108     _g1h(g1h), _cm(cm) { }
3109 
3110   void work(int worker_i) {
3111     CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i);
3112     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i);
3113     _g1h->collection_set_iterate_from(hr, &cmplt);
3114   }
3115 };
3116 
3117 void ConcurrentMark::complete_marking_in_collection_set() {
3118   G1CollectedHeap* g1h =  G1CollectedHeap::heap();
3119 
3120   if (!g1h->mark_in_progress()) {
3121     g1h->g1_policy()->record_mark_closure_time(0.0);
3122     return;
3123   }
3124 
3125   double start = os::elapsedTime();
3126   int n_workers = g1h->workers()->total_workers();
3127 
3128   G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
3129 
3130   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3131 
3132   if (G1CollectedHeap::use_parallel_gc_threads()) {

3133     g1h->set_par_threads(n_workers);
3134     g1h->workers()->run_task(&complete_mark_task);
3135     g1h->set_par_threads(0);
3136   } else {
3137     complete_mark_task.work(0);
3138   }
3139 
3140   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
3141 
3142   // Now reset the claim values in the regions in the collection set.
3143   SetClaimValuesInCSetHRClosure set_cv_cl(HeapRegion::InitialClaimValue);
3144   g1h->collection_set_iterate(&set_cv_cl);
3145 
3146   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3147 
3148   double end_time = os::elapsedTime();
3149   double elapsed_time_ms = (end_time - start) * 1000.0;
3150   g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
3151 }
3152 




1102     the_task->record_end_time();
1103     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1104 
1105     ConcurrentGCThread::stsLeave();
1106 
1107     double end_vtime = os::elapsedVTime();
1108     _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
1109   }
1110 
1111   CMConcurrentMarkingTask(ConcurrentMark* cm,
1112                           ConcurrentMarkThread* cmt) :
1113       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1114 
1115   ~CMConcurrentMarkingTask() { }
1116 };
1117 
1118 // Calculates the number of active workers for a concurrent
1119 // phase.
1120 int ConcurrentMark::calc_parallel_marking_threads() {
1121 
1122   size_t n_conc_workers = 0;
1123   if (G1CollectedHeap::use_parallel_gc_threads()) {


1124     if (!UseDynamicNumberOfGCThreads ||
1125         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1126          !ForceDynamicNumberOfGCThreads)) {
1127       n_conc_workers = max_parallel_marking_threads();
1128     } else {
1129       n_conc_workers =
1130         AdaptiveSizePolicy::calc_default_active_workers(
1131                                      max_parallel_marking_threads(),
1132                                      1, /* Minimum workers */
1133                                      parallel_marking_threads(),
1134                                      Threads::number_of_non_daemon_threads());
1135       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1136       // that scaling has already gone into "_max_parallel_marking_threads".
1137     }

1138     assert(n_conc_workers > 0, "Always need at least 1");
1139   }
1140   return (int) MAX2(n_conc_workers, max_parallel_marking_threads());
1141 }
1142 
1143 void ConcurrentMark::markFromRoots() {
1144   // we might be tempted to assert that:
1145   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1146   //        "inconsistent argument?");
1147   // However that wouldn't be right, because it's possible that
1148   // a safepoint is indeed in progress as a younger generation
1149   // stop-the-world GC happens even as we mark in this generation.
1150 
1151   _restart_for_overflow = false;


1152   force_overflow_conc()->init();
1153 
1154   // _g1h has _n_par_threads

1155   _parallel_marking_threads = calc_parallel_marking_threads();
1156   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1157     "Maximum number of marking threads exceeded");



1158 
1159   size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
1160 
1161   // Parallel task terminator is set in "set_phase()"
1162   set_phase(active_workers, true /* concurrent */);
1163 
1164   CMConcurrentMarkingTask markingTask(this, cmThread());
1165   if (parallel_marking_threads() > 0) {
1166     _parallel_workers->set_active_workers((int)active_workers);
1167     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1168     // and the decisions on that MT processing is made elsewhere.
1169     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1170     _parallel_workers->run_task(&markingTask);
1171   } else {
1172     markingTask.work(0);
1173   }
1174   print_stats();
1175 }
1176 
1177 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1178   // world is stopped at this checkpoint
1179   assert(SafepointSynchronize::is_at_safepoint(),
1180          "world should be stopped");
1181 
1182   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1183 
1184   // If a full collection has happened, we shouldn't do this.
1185   if (has_aborted()) {
1186     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1187     return;
1188   }
1189 


1746 
1747   HRSPhaseSetter x(HRSPhaseCleanup);
1748   g1h->verify_region_sets_optional();
1749 
1750   if (VerifyDuringGC) {
1751     HandleMark hm;  // handle scope
1752     gclog_or_tty->print(" VerifyDuringGC:(before)");
1753     Universe::heap()->prepare_for_verify();
1754     Universe::verify(/* allow dirty */ true,
1755                      /* silent      */ false,
1756                      /* option      */ VerifyOption_G1UsePrevMarking);
1757   }
1758 
1759   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1760   g1p->record_concurrent_mark_cleanup_start();
1761 
1762   double start = os::elapsedTime();
1763 
1764   HeapRegionRemSet::reset_for_cleanup_tasks();
1765 
1766   size_t n_workers;
1767 
1768 
1769   // Do counting once more with the world stopped for good measure.
1770   G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
1771                                         &_region_bm, &_card_bm);
1772   if (G1CollectedHeap::use_parallel_gc_threads()) {
1773     assert(g1h->check_heap_region_claim_values(
1774                                                HeapRegion::InitialClaimValue),
1775            "sanity check");
1776     
1777     g1h->set_par_threads();
1778     n_workers = g1h->n_par_threads();
1779     assert(g1h->n_par_threads() == (int) n_workers,
1780            "Should not have been reset");
1781     g1h->workers()->run_task(&g1_par_count_task);
1782     // Done with the parallel phase so reset to 0.
1783     g1h->set_par_threads(0);
1784 
1785     assert(g1h->check_heap_region_claim_values(
1786                                              HeapRegion::FinalCountClaimValue),
1787            "sanity check");
1788   } else {
1789     n_workers = 1;
1790     g1_par_count_task.work(0);
1791   }
1792 
1793   size_t known_garbage_bytes =
1794     g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
1795   g1p->set_known_garbage_bytes(known_garbage_bytes);
1796 
1797   size_t start_used_bytes = g1h->used();
1798   _at_least_one_mark_complete = true;
1799   g1h->set_marking_complete();
1800 
1801   ergo_verbose4(ErgoConcCycles,
1802            "finish cleanup",
1803            ergo_format_byte("occupancy")
1804            ergo_format_byte("capacity")
1805            ergo_format_byte_perc("known garbage"),
1806            start_used_bytes, g1h->capacity(),
1807            known_garbage_bytes,
1808            ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
1809 


1835     g1h->set_par_threads(0);
1836 
1837     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
1838            "sanity check");
1839   } else {
1840     g1_par_note_end_task.work(0);
1841   }
1842 
1843   if (!cleanup_list_is_empty()) {
1844     // The cleanup list is not empty, so we'll have to process it
1845     // concurrently. Notify anyone else that might be wanting free
1846     // regions that there will be more free regions coming soon.
1847     g1h->set_free_regions_coming();
1848   }
1849   double note_end_end = os::elapsedTime();
1850   if (G1PrintParCleanupStats) {
1851     gclog_or_tty->print_cr("  note end of marking: %8.3f ms.",
1852                            (note_end_end - note_end_start)*1000.0);
1853   }
1854 

1855   // call below, since it affects the metric by which we sort the heap
1856   // regions.
1857   if (G1ScrubRemSets) {
1858     double rs_scrub_start = os::elapsedTime();
1859     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
1860     if (G1CollectedHeap::use_parallel_gc_threads()) {
1861       g1h->set_par_threads((int)n_workers);
1862       g1h->workers()->run_task(&g1_par_scrub_rs_task);
1863       g1h->set_par_threads(0);
1864 
1865       assert(g1h->check_heap_region_claim_values(
1866                                             HeapRegion::ScrubRemSetClaimValue),
1867              "sanity check");
1868     } else {
1869       g1_par_scrub_rs_task.work(0);
1870     }
1871 
1872     double rs_scrub_end = os::elapsedTime();
1873     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1874     _total_rs_scrub_time += this_rs_scrub_time;


2312   ConcurrentMark *_cm;
2313 
2314 public:
2315   void work(int worker_i) {
2316     // Since all available tasks are actually started, we should
2317     // only proceed if we're supposed to be actived.
2318     if ((size_t)worker_i < _cm->active_tasks()) {
2319       CMTask* task = _cm->task(worker_i);
2320       task->record_start_time();
2321       do {
2322         task->do_marking_step(1000000000.0 /* something very large */,
2323                               true /* do_stealing    */,
2324                               true /* do_termination */);
2325       } while (task->has_aborted() && !_cm->has_overflown());
2326       // If we overflow, then we do not want to restart. We instead
2327       // want to abort remark and do concurrent marking again.
2328       task->record_end_time();
2329     }
2330   }
2331 
2332   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2333     AbstractGangTask("Par Remark"), _cm(cm) {
2334     _cm->terminator()->reset_for_reuse(active_workers);
2335   }
2336 };
2337 
2338 void ConcurrentMark::checkpointRootsFinalWork() {
2339   ResourceMark rm;
2340   HandleMark   hm;
2341   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2342 
2343   g1h->ensure_parsability(false);
2344 
2345   if (G1CollectedHeap::use_parallel_gc_threads()) {
2346     G1CollectedHeap::StrongRootsScope srs(g1h);
2347     // this is remark, so we'll use up all active threads
2348     int active_workers = g1h->workers()->active_workers();
2349     if (active_workers == 0) {
2350       assert(active_workers > 0, "Should have been set earlier");
2351       active_workers = ParallelGCThreads;
2352       g1h->workers()->set_active_workers(active_workers);
2353     }
2354     set_phase(active_workers, false /* concurrent */);
2355     // Leave _parallel_marking_threads at it's
2356     // value originally calculated in the ConcurrentMark
2357     // constructor and pass values of the active workers
2358     // through the gang in the task.
2359 
2360     CMRemarkTask remarkTask(this, active_workers);
2361     g1h->set_par_threads(active_workers);
2362     g1h->workers()->run_task(&remarkTask);
2363     g1h->set_par_threads(0);
2364   } else {
2365     G1CollectedHeap::StrongRootsScope srs(g1h);
2366     // this is remark, so we'll use up all available threads
2367     int active_workers = 1;
2368     set_phase(active_workers, false /* concurrent */);
2369 
2370     CMRemarkTask remarkTask(this, active_workers);
2371     // We will start all available threads, even if we decide that the
2372     // active_workers will be fewer. The extra ones will just bail out
2373     // immediately.
2374     remarkTask.work(0);
2375   }
2376   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2377   guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
2378 
2379   print_stats();
2380 
2381 #if VERIFY_OBJS_PROCESSED
2382   if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2383     gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2384                            _scan_obj_cl.objs_processed,
2385                            ThreadLocalObjQueue::objs_enqueued);
2386     guarantee(_scan_obj_cl.objs_processed ==
2387               ThreadLocalObjQueue::objs_enqueued,
2388               "Different number of objs processed and enqueued.");
2389   }
2390 #endif


3106                               ConcurrentMark* cm) :
3107     AbstractGangTask("Complete Mark in CSet"),
3108     _g1h(g1h), _cm(cm) { }
3109 
3110   void work(int worker_i) {
3111     CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i);
3112     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i);
3113     _g1h->collection_set_iterate_from(hr, &cmplt);
3114   }
3115 };
3116 
3117 void ConcurrentMark::complete_marking_in_collection_set() {
3118   G1CollectedHeap* g1h =  G1CollectedHeap::heap();
3119 
3120   if (!g1h->mark_in_progress()) {
3121     g1h->g1_policy()->record_mark_closure_time(0.0);
3122     return;
3123   }
3124 
3125   double start = os::elapsedTime();


3126   G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
3127 
3128   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3129 
3130   if (G1CollectedHeap::use_parallel_gc_threads()) {
3131     int n_workers = g1h->workers()->active_workers();
3132     g1h->set_par_threads(n_workers);
3133     g1h->workers()->run_task(&complete_mark_task);
3134     g1h->set_par_threads(0);
3135   } else {
3136     complete_mark_task.work(0);
3137   }
3138 
3139   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
3140 
3141   // Now reset the claim values in the regions in the collection set.
3142   SetClaimValuesInCSetHRClosure set_cv_cl(HeapRegion::InitialClaimValue);
3143   g1h->collection_set_iterate(&set_cv_cl);
3144 
3145   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
3146 
3147   double end_time = os::elapsedTime();
3148   double elapsed_time_ms = (end_time - start) * 1000.0;
3149   g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
3150 }
3151