< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"

  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"
  50 #include "gc/shared/taskqueue.inline.hpp"
  51 #include "gc/shared/vmGCOperations.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.inline.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 
  61 // Concurrent marking bit map wrapper
  62 
  63 CMBitMapRO::CMBitMapRO(int shifter) :


 503   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 504   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 505             CardTableModRefBS::card_shift,
 506             false /* in_resource_area*/),
 507 
 508   _prevMarkBitMap(&_markBitMap1),
 509   _nextMarkBitMap(&_markBitMap2),
 510 
 511   _markStack(this),
 512   // _finger set in set_non_marking_state
 513 
 514   _max_worker_id(ParallelGCThreads),
 515   // _active_tasks set in set_non_marking_state
 516   // _tasks set inside the constructor
 517   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 518   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 519 
 520   _has_overflown(false),
 521   _concurrent(false),
 522   _has_aborted(false),
 523   _aborted_gc_id(GCId::undefined()),
 524   _restart_for_overflow(false),
 525   _concurrent_marking_in_progress(false),
 526 
 527   // _verbose_level set below
 528 
 529   _init_times(),
 530   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 531   _cleanup_times(),
 532   _total_counting_time(0.0),
 533   _total_rs_scrub_time(0.0),
 534 
 535   _parallel_workers(NULL),
 536 
 537   _count_card_bitmaps(NULL),
 538   _count_marked_bytes(NULL),
 539   _completed_initialization(false) {
 540   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 541   if (verbose_level < no_verbose) {
 542     verbose_level = no_verbose;
 543   }


 974   }
 975 
 976   // If we're executing the concurrent phase of marking, reset the marking
 977   // state; otherwise the marking state is reset after reference processing,
 978   // during the remark pause.
 979   // If we reset here as a result of an overflow during the remark we will
 980   // see assertion failures from any subsequent set_concurrency_and_phase()
 981   // calls.
 982   if (concurrent()) {
 983     // let the task associated with with worker 0 do this
 984     if (worker_id == 0) {
 985       // task 0 is responsible for clearing the global data structures
 986       // We should be here because of an overflow. During STW we should
 987       // not clear the overflow flag since we rely on it being true when
 988       // we exit this method to abort the pause and restart concurrent
 989       // marking.
 990       reset_marking_state(true /* clear_overflow */);
 991       force_overflow()->update();
 992 
 993       if (G1Log::fine()) {
 994         gclog_or_tty->gclog_stamp(concurrent_gc_id());
 995         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 996       }
 997     }
 998   }
 999 
1000   // after this, each task should reset its own data structures then
1001   // then go into the second barrier
1002 }
1003 
1004 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1005   bool barrier_aborted;
1006 
1007   if (verbose_low()) {
1008     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1009   }
1010 
1011   {
1012     SuspendibleThreadSetLeaver sts_leave(concurrent());
1013     barrier_aborted = !_second_overflow_barrier_sync.enter();
1014   }


1164     CMRootRegions* root_regions = _cm->root_regions();
1165     HeapRegion* hr = root_regions->claim_next();
1166     while (hr != NULL) {
1167       _cm->scanRootRegion(hr, worker_id);
1168       hr = root_regions->claim_next();
1169     }
1170   }
1171 };
1172 
1173 void ConcurrentMark::scanRootRegions() {
1174   double scan_start = os::elapsedTime();
1175 
1176   // Start of concurrent marking.
1177   ClassLoaderDataGraph::clear_claimed_marks();
1178 
1179   // scan_in_progress() will have been set to true only if there was
1180   // at least one root region to scan. So, if it's false, we
1181   // should not attempt to do any further work.
1182   if (root_regions()->scan_in_progress()) {
1183     if (G1Log::fine()) {
1184       gclog_or_tty->gclog_stamp(concurrent_gc_id());
1185       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1186     }
1187 
1188     _parallel_marking_threads = calc_parallel_marking_threads();
1189     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1190            "Maximum number of marking threads exceeded");
1191     uint active_workers = MAX2(1U, parallel_marking_threads());
1192 
1193     CMRootRegionScanTask task(this);
1194     _parallel_workers->set_active_workers(active_workers);
1195     _parallel_workers->run_task(&task);
1196 
1197     if (G1Log::fine()) {
1198       gclog_or_tty->gclog_stamp(concurrent_gc_id());
1199       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1200     }
1201 
1202     // It's possible that has_aborted() is true here without actually
1203     // aborting the survivor scan earlier. This is OK as it's
1204     // mainly used for sanity checking.
1205     root_regions()->scan_finished();
1206   }
1207 }
1208 
1209 void ConcurrentMark::markFromRoots() {
1210   // we might be tempted to assert that:
1211   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1212   //        "inconsistent argument?");
1213   // However that wouldn't be right, because it's possible that
1214   // a safepoint is indeed in progress as a younger generation
1215   // stop-the-world GC happens even as we mark in this generation.
1216 
1217   _restart_for_overflow = false;
1218   force_overflow_conc()->init();


1228   // Parallel task terminator is set in "set_concurrency_and_phase()"
1229   set_concurrency_and_phase(active_workers, true /* concurrent */);
1230 
1231   CMConcurrentMarkingTask markingTask(this, cmThread());
1232   _parallel_workers->set_active_workers(active_workers);
1233   _parallel_workers->run_task(&markingTask);
1234   print_stats();
1235 }
1236 
1237 // Helper class to get rid of some boilerplate code.
1238 class G1CMTraceTime : public GCTraceTime {
1239   static bool doit_and_prepend(bool doit) {
1240     if (doit) {
1241       gclog_or_tty->put(' ');
1242     }
1243     return doit;
1244   }
1245 
1246  public:
1247   G1CMTraceTime(const char* title, bool doit)
1248     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1249         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1250   }
1251 };
1252 
1253 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1254   // world is stopped at this checkpoint
1255   assert(SafepointSynchronize::is_at_safepoint(),
1256          "world should be stopped");
1257 
1258   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1259 
1260   // If a full collection has happened, we shouldn't do this.
1261   if (has_aborted()) {
1262     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1263     return;
1264   }
1265 
1266   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1267 
1268   if (VerifyDuringGC) {
1269     HandleMark hm;  // handle scope


2374     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2375                                               g1h->workers(), active_workers);
2376     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2377 
2378     // Set the concurrency level. The phase was already set prior to
2379     // executing the remark task.
2380     set_concurrency(active_workers);
2381 
2382     // Set the degree of MT processing here.  If the discovery was done MT,
2383     // the number of threads involved during discovery could differ from
2384     // the number of active workers.  This is OK as long as the discovered
2385     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2386     rp->set_active_mt_degree(active_workers);
2387 
2388     // Process the weak references.
2389     const ReferenceProcessorStats& stats =
2390         rp->process_discovered_references(&g1_is_alive,
2391                                           &g1_keep_alive,
2392                                           &g1_drain_mark_stack,
2393                                           executor,
2394                                           g1h->gc_timer_cm(),
2395                                           concurrent_gc_id());
2396     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2397 
2398     // The do_oop work routines of the keep_alive and drain_marking_stack
2399     // oop closures will set the has_overflown flag if we overflow the
2400     // global marking stack.
2401 
2402     assert(_markStack.overflow() || _markStack.isEmpty(),
2403             "mark stack should be empty (unless it overflowed)");
2404 
2405     if (_markStack.overflow()) {
2406       // This should have been done already when we tried to push an
2407       // entry on to the global mark stack. But let's do it again.
2408       set_has_overflown();
2409     }
2410 
2411     assert(rp->num_q() == active_workers, "why not");
2412 
2413     rp->enqueue_discovered_references(executor);
2414 
2415     rp->verify_no_references_recorded();


2953     task_card_bm->clear();
2954   }
2955 }
2956 
2957 void ConcurrentMark::print_stats() {
2958   if (verbose_stats()) {
2959     gclog_or_tty->print_cr("---------------------------------------------------------------------");
2960     for (size_t i = 0; i < _active_tasks; ++i) {
2961       _tasks[i]->print_stats();
2962       gclog_or_tty->print_cr("---------------------------------------------------------------------");
2963     }
2964   }
2965 }
2966 
2967 // abandon current marking iteration due to a Full GC
2968 void ConcurrentMark::abort() {
2969   if (!cmThread()->during_cycle() || _has_aborted) {
2970     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2971     return;
2972   }
2973 
2974   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2975   // concurrent bitmap clearing.
2976   _nextMarkBitMap->clearAll();
2977 
2978   // Note we cannot clear the previous marking bitmap here
2979   // since VerifyDuringGC verifies the objects marked during
2980   // a full GC against the previous bitmap.
2981 
2982   // Clear the liveness counting data
2983   clear_all_count_data();
2984   // Empty mark stack
2985   reset_marking_state();
2986   for (uint i = 0; i < _max_worker_id; ++i) {
2987     _tasks[i]->clear_region_fields();
2988   }
2989   _first_overflow_barrier_sync.abort();
2990   _second_overflow_barrier_sync.abort();
2991   _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
2992   assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
2993   _has_aborted = true;
2994 
2995   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2996   satb_mq_set.abandon_partial_marking();
2997   // This can be called either during or outside marking, we'll read
2998   // the expected_active value from the SATB queue set.
2999   satb_mq_set.set_active_all_threads(
3000                                  false, /* new active value */
3001                                  satb_mq_set.is_active() /* expected_active */);
3002 
3003   _g1h->trace_heap_after_concurrent_cycle();
3004   _g1h->register_concurrent_cycle_end();
3005 }
3006 
3007 const GCId& ConcurrentMark::concurrent_gc_id() {
3008   if (has_aborted()) {
3009     return _aborted_gc_id;
3010   }
3011   return _g1h->gc_tracer_cm()->gc_id();
3012 }
3013 
3014 static void print_ms_time_info(const char* prefix, const char* name,
3015                                NumberSeq& ns) {
3016   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3017                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3018   if (ns.num() > 0) {
3019     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3020                            prefix, ns.sd(), ns.maximum());
3021   }
3022 }
3023 
3024 void ConcurrentMark::print_summary_info() {
3025   gclog_or_tty->print_cr(" Concurrent marking:");
3026   print_ms_time_info("  ", "init marks", _init_times);
3027   print_ms_time_info("  ", "remarks", _remark_times);
3028   {
3029     print_ms_time_info("     ", "final marks", _remark_mark_times);
3030     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3031 




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 CMBitMapRO::CMBitMapRO(int shifter) :


 504   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 505   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 506             CardTableModRefBS::card_shift,
 507             false /* in_resource_area*/),
 508 
 509   _prevMarkBitMap(&_markBitMap1),
 510   _nextMarkBitMap(&_markBitMap2),
 511 
 512   _markStack(this),
 513   // _finger set in set_non_marking_state
 514 
 515   _max_worker_id(ParallelGCThreads),
 516   // _active_tasks set in set_non_marking_state
 517   // _tasks set inside the constructor
 518   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 519   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 520 
 521   _has_overflown(false),
 522   _concurrent(false),
 523   _has_aborted(false),

 524   _restart_for_overflow(false),
 525   _concurrent_marking_in_progress(false),
 526 
 527   // _verbose_level set below
 528 
 529   _init_times(),
 530   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 531   _cleanup_times(),
 532   _total_counting_time(0.0),
 533   _total_rs_scrub_time(0.0),
 534 
 535   _parallel_workers(NULL),
 536 
 537   _count_card_bitmaps(NULL),
 538   _count_marked_bytes(NULL),
 539   _completed_initialization(false) {
 540   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 541   if (verbose_level < no_verbose) {
 542     verbose_level = no_verbose;
 543   }


 974   }
 975 
 976   // If we're executing the concurrent phase of marking, reset the marking
 977   // state; otherwise the marking state is reset after reference processing,
 978   // during the remark pause.
 979   // If we reset here as a result of an overflow during the remark we will
 980   // see assertion failures from any subsequent set_concurrency_and_phase()
 981   // calls.
 982   if (concurrent()) {
 983     // let the task associated with with worker 0 do this
 984     if (worker_id == 0) {
 985       // task 0 is responsible for clearing the global data structures
 986       // We should be here because of an overflow. During STW we should
 987       // not clear the overflow flag since we rely on it being true when
 988       // we exit this method to abort the pause and restart concurrent
 989       // marking.
 990       reset_marking_state(true /* clear_overflow */);
 991       force_overflow()->update();
 992 
 993       if (G1Log::fine()) {
 994         gclog_or_tty->gclog_stamp();
 995         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 996       }
 997     }
 998   }
 999 
1000   // after this, each task should reset its own data structures then
1001   // then go into the second barrier
1002 }
1003 
1004 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1005   bool barrier_aborted;
1006 
1007   if (verbose_low()) {
1008     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1009   }
1010 
1011   {
1012     SuspendibleThreadSetLeaver sts_leave(concurrent());
1013     barrier_aborted = !_second_overflow_barrier_sync.enter();
1014   }


1164     CMRootRegions* root_regions = _cm->root_regions();
1165     HeapRegion* hr = root_regions->claim_next();
1166     while (hr != NULL) {
1167       _cm->scanRootRegion(hr, worker_id);
1168       hr = root_regions->claim_next();
1169     }
1170   }
1171 };
1172 
1173 void ConcurrentMark::scanRootRegions() {
1174   double scan_start = os::elapsedTime();
1175 
1176   // Start of concurrent marking.
1177   ClassLoaderDataGraph::clear_claimed_marks();
1178 
1179   // scan_in_progress() will have been set to true only if there was
1180   // at least one root region to scan. So, if it's false, we
1181   // should not attempt to do any further work.
1182   if (root_regions()->scan_in_progress()) {
1183     if (G1Log::fine()) {
1184       gclog_or_tty->gclog_stamp();
1185       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1186     }
1187 
1188     _parallel_marking_threads = calc_parallel_marking_threads();
1189     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1190            "Maximum number of marking threads exceeded");
1191     uint active_workers = MAX2(1U, parallel_marking_threads());
1192 
1193     CMRootRegionScanTask task(this);
1194     _parallel_workers->set_active_workers(active_workers);
1195     _parallel_workers->run_task(&task);
1196 
1197     if (G1Log::fine()) {
1198       gclog_or_tty->gclog_stamp();
1199       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1200     }
1201 
1202     // It's possible that has_aborted() is true here without actually
1203     // aborting the survivor scan earlier. This is OK as it's
1204     // mainly used for sanity checking.
1205     root_regions()->scan_finished();
1206   }
1207 }
1208 
1209 void ConcurrentMark::markFromRoots() {
1210   // we might be tempted to assert that:
1211   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1212   //        "inconsistent argument?");
1213   // However that wouldn't be right, because it's possible that
1214   // a safepoint is indeed in progress as a younger generation
1215   // stop-the-world GC happens even as we mark in this generation.
1216 
1217   _restart_for_overflow = false;
1218   force_overflow_conc()->init();


1228   // Parallel task terminator is set in "set_concurrency_and_phase()"
1229   set_concurrency_and_phase(active_workers, true /* concurrent */);
1230 
1231   CMConcurrentMarkingTask markingTask(this, cmThread());
1232   _parallel_workers->set_active_workers(active_workers);
1233   _parallel_workers->run_task(&markingTask);
1234   print_stats();
1235 }
1236 
1237 // Helper class to get rid of some boilerplate code.
1238 class G1CMTraceTime : public GCTraceTime {
1239   static bool doit_and_prepend(bool doit) {
1240     if (doit) {
1241       gclog_or_tty->put(' ');
1242     }
1243     return doit;
1244   }
1245 
1246  public:
1247   G1CMTraceTime(const char* title, bool doit)
1248     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {

1249   }
1250 };
1251 
1252 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1253   // world is stopped at this checkpoint
1254   assert(SafepointSynchronize::is_at_safepoint(),
1255          "world should be stopped");
1256 
1257   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1258 
1259   // If a full collection has happened, we shouldn't do this.
1260   if (has_aborted()) {
1261     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1262     return;
1263   }
1264 
1265   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1266 
1267   if (VerifyDuringGC) {
1268     HandleMark hm;  // handle scope


2373     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2374                                               g1h->workers(), active_workers);
2375     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2376 
2377     // Set the concurrency level. The phase was already set prior to
2378     // executing the remark task.
2379     set_concurrency(active_workers);
2380 
2381     // Set the degree of MT processing here.  If the discovery was done MT,
2382     // the number of threads involved during discovery could differ from
2383     // the number of active workers.  This is OK as long as the discovered
2384     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2385     rp->set_active_mt_degree(active_workers);
2386 
2387     // Process the weak references.
2388     const ReferenceProcessorStats& stats =
2389         rp->process_discovered_references(&g1_is_alive,
2390                                           &g1_keep_alive,
2391                                           &g1_drain_mark_stack,
2392                                           executor,
2393                                           g1h->gc_timer_cm());

2394     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2395 
2396     // The do_oop work routines of the keep_alive and drain_marking_stack
2397     // oop closures will set the has_overflown flag if we overflow the
2398     // global marking stack.
2399 
2400     assert(_markStack.overflow() || _markStack.isEmpty(),
2401             "mark stack should be empty (unless it overflowed)");
2402 
2403     if (_markStack.overflow()) {
2404       // This should have been done already when we tried to push an
2405       // entry on to the global mark stack. But let's do it again.
2406       set_has_overflown();
2407     }
2408 
2409     assert(rp->num_q() == active_workers, "why not");
2410 
2411     rp->enqueue_discovered_references(executor);
2412 
2413     rp->verify_no_references_recorded();


2951     task_card_bm->clear();
2952   }
2953 }
2954 
2955 void ConcurrentMark::print_stats() {
2956   if (verbose_stats()) {
2957     gclog_or_tty->print_cr("---------------------------------------------------------------------");
2958     for (size_t i = 0; i < _active_tasks; ++i) {
2959       _tasks[i]->print_stats();
2960       gclog_or_tty->print_cr("---------------------------------------------------------------------");
2961     }
2962   }
2963 }
2964 
2965 // abandon current marking iteration due to a Full GC
2966 void ConcurrentMark::abort() {
2967   if (!cmThread()->during_cycle() || _has_aborted) {
2968     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2969     return;
2970   }

2971   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2972   // concurrent bitmap clearing.
2973   _nextMarkBitMap->clearAll();
2974 
2975   // Note we cannot clear the previous marking bitmap here
2976   // since VerifyDuringGC verifies the objects marked during
2977   // a full GC against the previous bitmap.
2978 
2979   // Clear the liveness counting data
2980   clear_all_count_data();
2981   // Empty mark stack
2982   reset_marking_state();
2983   for (uint i = 0; i < _max_worker_id; ++i) {
2984     _tasks[i]->clear_region_fields();
2985   }
2986   _first_overflow_barrier_sync.abort();
2987   _second_overflow_barrier_sync.abort();


2988   _has_aborted = true;
2989 
2990   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2991   satb_mq_set.abandon_partial_marking();
2992   // This can be called either during or outside marking, we'll read
2993   // the expected_active value from the SATB queue set.
2994   satb_mq_set.set_active_all_threads(
2995                                  false, /* new active value */
2996                                  satb_mq_set.is_active() /* expected_active */);
2997 
2998   _g1h->trace_heap_after_concurrent_cycle();
2999   _g1h->register_concurrent_cycle_end();







3000 }
3001 
3002 static void print_ms_time_info(const char* prefix, const char* name,
3003                                NumberSeq& ns) {
3004   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3005                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3006   if (ns.num() > 0) {
3007     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3008                            prefix, ns.sd(), ns.maximum());
3009   }
3010 }
3011 
3012 void ConcurrentMark::print_summary_info() {
3013   gclog_or_tty->print_cr(" Concurrent marking:");
3014   print_ms_time_info("  ", "init marks", _init_times);
3015   print_ms_time_info("  ", "remarks", _remark_times);
3016   {
3017     print_ms_time_info("     ", "final marks", _remark_mark_times);
3018     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3019 


< prev index next >