< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"

  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"
  50 #include "gc/shared/taskqueue.inline.hpp"
  51 #include "gc/shared/vmGCOperations.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.inline.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 
  61 // Concurrent marking bit map wrapper
  62 
  63 CMBitMapRO::CMBitMapRO(int shifter) :


 503   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 504   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 505             CardTableModRefBS::card_shift,
 506             false /* in_resource_area*/),
 507 
 508   _prevMarkBitMap(&_markBitMap1),
 509   _nextMarkBitMap(&_markBitMap2),
 510 
 511   _markStack(this),
 512   // _finger set in set_non_marking_state
 513 
 514   _max_worker_id(ParallelGCThreads),
 515   // _active_tasks set in set_non_marking_state
 516   // _tasks set inside the constructor
 517   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 518   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 519 
 520   _has_overflown(false),
 521   _concurrent(false),
 522   _has_aborted(false),
 523   _aborted_gc_id(GCId::undefined()),
 524   _restart_for_overflow(false),
 525   _concurrent_marking_in_progress(false),
 526 
 527   // _verbose_level set below
 528 
 529   _init_times(),
 530   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 531   _cleanup_times(),
 532   _total_counting_time(0.0),
 533   _total_rs_scrub_time(0.0),
 534 
 535   _parallel_workers(NULL),
 536 
 537   _count_card_bitmaps(NULL),
 538   _count_marked_bytes(NULL),
 539   _completed_initialization(false) {
 540   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 541   if (verbose_level < no_verbose) {
 542     verbose_level = no_verbose;
 543   }


 974   }
 975 
 976   // If we're executing the concurrent phase of marking, reset the marking
 977   // state; otherwise the marking state is reset after reference processing,
 978   // during the remark pause.
 979   // If we reset here as a result of an overflow during the remark we will
 980   // see assertion failures from any subsequent set_concurrency_and_phase()
 981   // calls.
 982   if (concurrent()) {
 983     // let the task associated with with worker 0 do this
 984     if (worker_id == 0) {
 985       // task 0 is responsible for clearing the global data structures
 986       // We should be here because of an overflow. During STW we should
 987       // not clear the overflow flag since we rely on it being true when
 988       // we exit this method to abort the pause and restart concurrent
 989       // marking.
 990       reset_marking_state(true /* clear_overflow */);
 991       force_overflow()->update();
 992 
 993       if (G1Log::fine()) {
 994         gclog_or_tty->gclog_stamp(concurrent_gc_id());
 995         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 996       }
 997     }
 998   }
 999 
1000   // after this, each task should reset its own data structures then
1001   // then go into the second barrier
1002 }
1003 
1004 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1005   bool barrier_aborted;
1006 
1007   if (verbose_low()) {
1008     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1009   }
1010 
1011   {
1012     SuspendibleThreadSetLeaver sts_leave(concurrent());
1013     barrier_aborted = !_second_overflow_barrier_sync.enter();
1014   }


1164     CMRootRegions* root_regions = _cm->root_regions();
1165     HeapRegion* hr = root_regions->claim_next();
1166     while (hr != NULL) {
1167       _cm->scanRootRegion(hr, worker_id);
1168       hr = root_regions->claim_next();
1169     }
1170   }
1171 };
1172 
1173 void ConcurrentMark::scanRootRegions() {
1174   double scan_start = os::elapsedTime();
1175 
1176   // Start of concurrent marking.
1177   ClassLoaderDataGraph::clear_claimed_marks();
1178 
1179   // scan_in_progress() will have been set to true only if there was
1180   // at least one root region to scan. So, if it's false, we
1181   // should not attempt to do any further work.
1182   if (root_regions()->scan_in_progress()) {
1183     if (G1Log::fine()) {
1184       gclog_or_tty->gclog_stamp(concurrent_gc_id());
1185       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1186     }
1187 
1188     _parallel_marking_threads = calc_parallel_marking_threads();
1189     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1190            "Maximum number of marking threads exceeded");
1191     uint active_workers = MAX2(1U, parallel_marking_threads());
1192 
1193     CMRootRegionScanTask task(this);
1194     _parallel_workers->set_active_workers(active_workers);
1195     _parallel_workers->run_task(&task);
1196 
1197     if (G1Log::fine()) {
1198       gclog_or_tty->gclog_stamp(concurrent_gc_id());
1199       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1200     }
1201 
1202     // It's possible that has_aborted() is true here without actually
1203     // aborting the survivor scan earlier. This is OK as it's
1204     // mainly used for sanity checking.
1205     root_regions()->scan_finished();
1206   }
1207 }
1208 
1209 void ConcurrentMark::markFromRoots() {
1210   // we might be tempted to assert that:
1211   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1212   //        "inconsistent argument?");
1213   // However that wouldn't be right, because it's possible that
1214   // a safepoint is indeed in progress as a younger generation
1215   // stop-the-world GC happens even as we mark in this generation.
1216 
1217   _restart_for_overflow = false;
1218   force_overflow_conc()->init();


1229   set_concurrency_and_phase(active_workers, true /* concurrent */);
1230 
1231   CMConcurrentMarkingTask markingTask(this, cmThread());
1232   _parallel_workers->set_active_workers(active_workers);
1233   _parallel_workers->run_task(&markingTask);
1234   print_stats();
1235 }
1236 
1237 // Helper class to get rid of some boilerplate code.
1238 class G1CMTraceTime : public StackObj {
1239   GCTraceTimeImpl _gc_trace_time;
1240   static bool doit_and_prepend(bool doit) {
1241     if (doit) {
1242       gclog_or_tty->put(' ');
1243     }
1244     return doit;
1245   }
1246 
1247  public:
1248   G1CMTraceTime(const char* title, bool doit)
1249     : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1250         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1251   }
1252 };
1253 
1254 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1255   // world is stopped at this checkpoint
1256   assert(SafepointSynchronize::is_at_safepoint(),
1257          "world should be stopped");
1258 
1259   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1260 
1261   // If a full collection has happened, we shouldn't do this.
1262   if (has_aborted()) {
1263     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1264     return;
1265   }
1266 
1267   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1268 
1269   if (VerifyDuringGC) {
1270     HandleMark hm;  // handle scope


2375     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2376                                               g1h->workers(), active_workers);
2377     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2378 
2379     // Set the concurrency level. The phase was already set prior to
2380     // executing the remark task.
2381     set_concurrency(active_workers);
2382 
2383     // Set the degree of MT processing here.  If the discovery was done MT,
2384     // the number of threads involved during discovery could differ from
2385     // the number of active workers.  This is OK as long as the discovered
2386     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2387     rp->set_active_mt_degree(active_workers);
2388 
2389     // Process the weak references.
2390     const ReferenceProcessorStats& stats =
2391         rp->process_discovered_references(&g1_is_alive,
2392                                           &g1_keep_alive,
2393                                           &g1_drain_mark_stack,
2394                                           executor,
2395                                           g1h->gc_timer_cm(),
2396                                           concurrent_gc_id());
2397     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2398 
2399     // The do_oop work routines of the keep_alive and drain_marking_stack
2400     // oop closures will set the has_overflown flag if we overflow the
2401     // global marking stack.
2402 
2403     assert(_markStack.overflow() || _markStack.isEmpty(),
2404             "mark stack should be empty (unless it overflowed)");
2405 
2406     if (_markStack.overflow()) {
2407       // This should have been done already when we tried to push an
2408       // entry on to the global mark stack. But let's do it again.
2409       set_has_overflown();
2410     }
2411 
2412     assert(rp->num_q() == active_workers, "why not");
2413 
2414     rp->enqueue_discovered_references(executor);
2415 
2416     rp->verify_no_references_recorded();


2972     return;
2973   }
2974 
2975   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2976   // concurrent bitmap clearing.
2977   _nextMarkBitMap->clearAll();
2978 
2979   // Note we cannot clear the previous marking bitmap here
2980   // since VerifyDuringGC verifies the objects marked during
2981   // a full GC against the previous bitmap.
2982 
2983   // Clear the liveness counting data
2984   clear_all_count_data();
2985   // Empty mark stack
2986   reset_marking_state();
2987   for (uint i = 0; i < _max_worker_id; ++i) {
2988     _tasks[i]->clear_region_fields();
2989   }
2990   _first_overflow_barrier_sync.abort();
2991   _second_overflow_barrier_sync.abort();
2992   _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
2993   assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
2994   _has_aborted = true;
2995 
2996   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2997   satb_mq_set.abandon_partial_marking();
2998   // This can be called either during or outside marking, we'll read
2999   // the expected_active value from the SATB queue set.
3000   satb_mq_set.set_active_all_threads(
3001                                  false, /* new active value */
3002                                  satb_mq_set.is_active() /* expected_active */);
3003 
3004   _g1h->trace_heap_after_concurrent_cycle();
3005   _g1h->register_concurrent_cycle_end();
3006 }
3007 
3008 const GCId& ConcurrentMark::concurrent_gc_id() {
3009   if (has_aborted()) {
3010     return _aborted_gc_id;
3011   }
3012   return _g1h->gc_tracer_cm()->gc_id();
3013 }
3014 
3015 static void print_ms_time_info(const char* prefix, const char* name,
3016                                NumberSeq& ns) {
3017   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3018                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3019   if (ns.num() > 0) {
3020     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3021                            prefix, ns.sd(), ns.maximum());
3022   }
3023 }
3024 
3025 void ConcurrentMark::print_summary_info() {
3026   gclog_or_tty->print_cr(" Concurrent marking:");
3027   print_ms_time_info("  ", "init marks", _init_times);
3028   print_ms_time_info("  ", "remarks", _remark_times);
3029   {
3030     print_ms_time_info("     ", "final marks", _remark_mark_times);
3031     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3032 




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 CMBitMapRO::CMBitMapRO(int shifter) :


 504   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 505   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 506             CardTableModRefBS::card_shift,
 507             false /* in_resource_area*/),
 508 
 509   _prevMarkBitMap(&_markBitMap1),
 510   _nextMarkBitMap(&_markBitMap2),
 511 
 512   _markStack(this),
 513   // _finger set in set_non_marking_state
 514 
 515   _max_worker_id(ParallelGCThreads),
 516   // _active_tasks set in set_non_marking_state
 517   // _tasks set inside the constructor
 518   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 519   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 520 
 521   _has_overflown(false),
 522   _concurrent(false),
 523   _has_aborted(false),

 524   _restart_for_overflow(false),
 525   _concurrent_marking_in_progress(false),
 526 
 527   // _verbose_level set below
 528 
 529   _init_times(),
 530   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 531   _cleanup_times(),
 532   _total_counting_time(0.0),
 533   _total_rs_scrub_time(0.0),
 534 
 535   _parallel_workers(NULL),
 536 
 537   _count_card_bitmaps(NULL),
 538   _count_marked_bytes(NULL),
 539   _completed_initialization(false) {
 540   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 541   if (verbose_level < no_verbose) {
 542     verbose_level = no_verbose;
 543   }


 974   }
 975 
 976   // If we're executing the concurrent phase of marking, reset the marking
 977   // state; otherwise the marking state is reset after reference processing,
 978   // during the remark pause.
 979   // If we reset here as a result of an overflow during the remark we will
 980   // see assertion failures from any subsequent set_concurrency_and_phase()
 981   // calls.
 982   if (concurrent()) {
 983     // let the task associated with with worker 0 do this
 984     if (worker_id == 0) {
 985       // task 0 is responsible for clearing the global data structures
 986       // We should be here because of an overflow. During STW we should
 987       // not clear the overflow flag since we rely on it being true when
 988       // we exit this method to abort the pause and restart concurrent
 989       // marking.
 990       reset_marking_state(true /* clear_overflow */);
 991       force_overflow()->update();
 992 
 993       if (G1Log::fine()) {
 994         gclog_or_tty->gclog_stamp();
 995         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 996       }
 997     }
 998   }
 999 
1000   // after this, each task should reset its own data structures then
1001   // then go into the second barrier
1002 }
1003 
1004 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1005   bool barrier_aborted;
1006 
1007   if (verbose_low()) {
1008     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1009   }
1010 
1011   {
1012     SuspendibleThreadSetLeaver sts_leave(concurrent());
1013     barrier_aborted = !_second_overflow_barrier_sync.enter();
1014   }


1164     CMRootRegions* root_regions = _cm->root_regions();
1165     HeapRegion* hr = root_regions->claim_next();
1166     while (hr != NULL) {
1167       _cm->scanRootRegion(hr, worker_id);
1168       hr = root_regions->claim_next();
1169     }
1170   }
1171 };
1172 
1173 void ConcurrentMark::scanRootRegions() {
1174   double scan_start = os::elapsedTime();
1175 
1176   // Start of concurrent marking.
1177   ClassLoaderDataGraph::clear_claimed_marks();
1178 
1179   // scan_in_progress() will have been set to true only if there was
1180   // at least one root region to scan. So, if it's false, we
1181   // should not attempt to do any further work.
1182   if (root_regions()->scan_in_progress()) {
1183     if (G1Log::fine()) {
1184       gclog_or_tty->gclog_stamp();
1185       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1186     }
1187 
1188     _parallel_marking_threads = calc_parallel_marking_threads();
1189     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1190            "Maximum number of marking threads exceeded");
1191     uint active_workers = MAX2(1U, parallel_marking_threads());
1192 
1193     CMRootRegionScanTask task(this);
1194     _parallel_workers->set_active_workers(active_workers);
1195     _parallel_workers->run_task(&task);
1196 
1197     if (G1Log::fine()) {
1198       gclog_or_tty->gclog_stamp();
1199       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1200     }
1201 
1202     // It's possible that has_aborted() is true here without actually
1203     // aborting the survivor scan earlier. This is OK as it's
1204     // mainly used for sanity checking.
1205     root_regions()->scan_finished();
1206   }
1207 }
1208 
1209 void ConcurrentMark::markFromRoots() {
1210   // we might be tempted to assert that:
1211   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1212   //        "inconsistent argument?");
1213   // However that wouldn't be right, because it's possible that
1214   // a safepoint is indeed in progress as a younger generation
1215   // stop-the-world GC happens even as we mark in this generation.
1216 
1217   _restart_for_overflow = false;
1218   force_overflow_conc()->init();


1229   set_concurrency_and_phase(active_workers, true /* concurrent */);
1230 
1231   CMConcurrentMarkingTask markingTask(this, cmThread());
1232   _parallel_workers->set_active_workers(active_workers);
1233   _parallel_workers->run_task(&markingTask);
1234   print_stats();
1235 }
1236 
1237 // Helper class to get rid of some boilerplate code.
1238 class G1CMTraceTime : public StackObj {
1239   GCTraceTimeImpl _gc_trace_time;
1240   static bool doit_and_prepend(bool doit) {
1241     if (doit) {
1242       gclog_or_tty->put(' ');
1243     }
1244     return doit;
1245   }
1246 
1247  public:
1248   G1CMTraceTime(const char* title, bool doit)
1249     : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {

1250   }
1251 };
1252 
1253 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1254   // world is stopped at this checkpoint
1255   assert(SafepointSynchronize::is_at_safepoint(),
1256          "world should be stopped");
1257 
1258   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1259 
1260   // If a full collection has happened, we shouldn't do this.
1261   if (has_aborted()) {
1262     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1263     return;
1264   }
1265 
1266   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1267 
1268   if (VerifyDuringGC) {
1269     HandleMark hm;  // handle scope


2374     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2375                                               g1h->workers(), active_workers);
2376     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2377 
2378     // Set the concurrency level. The phase was already set prior to
2379     // executing the remark task.
2380     set_concurrency(active_workers);
2381 
2382     // Set the degree of MT processing here.  If the discovery was done MT,
2383     // the number of threads involved during discovery could differ from
2384     // the number of active workers.  This is OK as long as the discovered
2385     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2386     rp->set_active_mt_degree(active_workers);
2387 
2388     // Process the weak references.
2389     const ReferenceProcessorStats& stats =
2390         rp->process_discovered_references(&g1_is_alive,
2391                                           &g1_keep_alive,
2392                                           &g1_drain_mark_stack,
2393                                           executor,
2394                                           g1h->gc_timer_cm());

2395     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2396 
2397     // The do_oop work routines of the keep_alive and drain_marking_stack
2398     // oop closures will set the has_overflown flag if we overflow the
2399     // global marking stack.
2400 
2401     assert(_markStack.overflow() || _markStack.isEmpty(),
2402             "mark stack should be empty (unless it overflowed)");
2403 
2404     if (_markStack.overflow()) {
2405       // This should have been done already when we tried to push an
2406       // entry on to the global mark stack. But let's do it again.
2407       set_has_overflown();
2408     }
2409 
2410     assert(rp->num_q() == active_workers, "why not");
2411 
2412     rp->enqueue_discovered_references(executor);
2413 
2414     rp->verify_no_references_recorded();


2970     return;
2971   }
2972 
2973   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2974   // concurrent bitmap clearing.
2975   _nextMarkBitMap->clearAll();
2976 
2977   // Note we cannot clear the previous marking bitmap here
2978   // since VerifyDuringGC verifies the objects marked during
2979   // a full GC against the previous bitmap.
2980 
2981   // Clear the liveness counting data
2982   clear_all_count_data();
2983   // Empty mark stack
2984   reset_marking_state();
2985   for (uint i = 0; i < _max_worker_id; ++i) {
2986     _tasks[i]->clear_region_fields();
2987   }
2988   _first_overflow_barrier_sync.abort();
2989   _second_overflow_barrier_sync.abort();


2990   _has_aborted = true;
2991 
2992   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2993   satb_mq_set.abandon_partial_marking();
2994   // This can be called either during or outside marking, we'll read
2995   // the expected_active value from the SATB queue set.
2996   satb_mq_set.set_active_all_threads(
2997                                  false, /* new active value */
2998                                  satb_mq_set.is_active() /* expected_active */);
2999 
3000   _g1h->trace_heap_after_concurrent_cycle();
3001   _g1h->register_concurrent_cycle_end();







3002 }
3003 
3004 static void print_ms_time_info(const char* prefix, const char* name,
3005                                NumberSeq& ns) {
3006   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3007                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3008   if (ns.num() > 0) {
3009     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3010                            prefix, ns.sd(), ns.maximum());
3011   }
3012 }
3013 
3014 void ConcurrentMark::print_summary_info() {
3015   gclog_or_tty->print_cr(" Concurrent marking:");
3016   print_ms_time_info("  ", "init marks", _init_times);
3017   print_ms_time_info("  ", "remarks", _remark_times);
3018   {
3019     print_ms_time_info("     ", "final marks", _remark_mark_times);
3020     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3021 


< prev index next >