< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 10384 : [mq]: step.00
rev 10385 : [mq]: step.01


 424             CardTableModRefBS::card_shift,
 425             false /* in_resource_area*/),
 426 
 427   _prevMarkBitMap(&_markBitMap1),
 428   _nextMarkBitMap(&_markBitMap2),
 429 
 430   _markStack(this),
 431   // _finger set in set_non_marking_state
 432 
 433   _max_worker_id(ParallelGCThreads),
 434   // _active_tasks set in set_non_marking_state
 435   // _tasks set inside the constructor
 436   _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
 437   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 438 
 439   _has_overflown(false),
 440   _concurrent(false),
 441   _has_aborted(false),
 442   _restart_for_overflow(false),
 443   _concurrent_marking_in_progress(false),
 444   _concurrent_phase_status(ConcPhaseNotStarted),

 445 
 446   // _verbose_level set below
 447 
 448   _init_times(),
 449   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 450   _cleanup_times(),
 451   _total_counting_time(0.0),
 452   _total_rs_scrub_time(0.0),
 453 
 454   _parallel_workers(NULL),
 455 
 456   _count_card_bitmaps(NULL),
 457   _count_marked_bytes(NULL),
 458   _completed_initialization(false) {
 459 
 460   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 461   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 462 
 463   // Create & start a ConcurrentMark thread.
 464   _cmThread = new ConcurrentMarkThread(this);


 990   if (root_regions()->scan_in_progress()) {
 991     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 992     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 993 
 994     _parallel_marking_threads = calc_parallel_marking_threads();
 995     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
 996            "Maximum number of marking threads exceeded");
 997     uint active_workers = MAX2(1U, parallel_marking_threads());
 998 
 999     G1CMRootRegionScanTask task(this);
1000     _parallel_workers->set_active_workers(active_workers);
1001     _parallel_workers->run_task(&task);
1002 
1003     // It's possible that has_aborted() is true here without actually
1004     // aborting the survivor scan earlier. This is OK as it's
1005     // mainly used for sanity checking.
1006     root_regions()->scan_finished();
1007   }
1008 }
1009 
1010 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1011   uint old_val = 0;
1012   do {
1013     old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
1014   } while (old_val != ConcPhaseNotStarted);
1015   _g1h->gc_timer_cm()->register_gc_concurrent_start(title);
1016 }
1017 
1018 void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
1019   if (_concurrent_phase_status == ConcPhaseNotStarted) {
1020     return;
1021   }
1022 
1023   uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
1024   if (old_val == ConcPhaseStarted) {
1025     _g1h->gc_timer_cm()->register_gc_concurrent_end();
1026     // If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
1027     // We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
1028     // starting a new concurrent phase by 'ConcurrentMarkThread'.
1029     if (end_timer) {
1030       _g1h->gc_timer_cm()->register_gc_end();
1031     }
1032     old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
1033     assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
1034   } else {
1035     do {
1036       // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
1037       os::naked_short_sleep(1);
1038     } while (_concurrent_phase_status != ConcPhaseNotStarted);
1039   }
1040 }
1041 
1042 void G1ConcurrentMark::register_concurrent_phase_end() {
1043   register_concurrent_phase_end_common(false);
1044 }
1045 
1046 void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
1047   register_concurrent_phase_end_common(true);
1048 }
1049 
1050 void G1ConcurrentMark::markFromRoots() {
1051   // we might be tempted to assert that:
1052   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1053   //        "inconsistent argument?");
1054   // However that wouldn't be right, because it's possible that
1055   // a safepoint is indeed in progress as a younger generation
1056   // stop-the-world GC happens even as we mark in this generation.
1057 
1058   _restart_for_overflow = false;
1059 
1060   // _g1h has _n_par_threads
1061   _parallel_marking_threads = calc_parallel_marking_threads();
1062   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1063     "Maximum number of marking threads exceeded");
1064 
1065   uint active_workers = MAX2(1U, parallel_marking_threads());
1066   assert(active_workers > 0, "Should have been set");
1067 


1107 
1108   weakRefsWork(clear_all_soft_refs);
1109 
1110   if (has_overflown()) {
1111     // Oops.  We overflowed.  Restart concurrent marking.
1112     _restart_for_overflow = true;
1113     log_develop_trace(gc)("Remark led to restart for overflow.");
1114 
1115     // Verify the heap w.r.t. the previous marking bitmap.
1116     if (VerifyDuringGC) {
1117       HandleMark hm;  // handle scope
1118       g1h->prepare_for_verify();
1119       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1120     }
1121 
1122     // Clear the marking state because we will be restarting
1123     // marking due to overflowing the global mark stack.
1124     reset_marking_state();
1125   } else {
1126     {
1127       GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
1128 
1129       // Aggregate the per-task counting data that we have accumulated
1130       // while marking.
1131       aggregate_count_data();
1132     }
1133 
1134     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1135     // We're done with marking.
1136     // This is the end of  the marking cycle, we're expected all
1137     // threads to have SATB queues with active set to true.
1138     satb_mq_set.set_active_all_threads(false, /* new active value */
1139                                        true /* expected_active */);
1140 
1141     if (VerifyDuringGC) {
1142       HandleMark hm;  // handle scope
1143       g1h->prepare_for_verify();
1144       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1145     }
1146     g1h->verifier()->check_bitmaps("Remark End");
1147     assert(!restart_for_overflow(), "sanity");
1148     // Completely reset the marking state since marking completed
1149     set_non_marking_state();
1150   }
1151 
1152   // Expand the marking stack, if we have to and if we can.
1153   if (_markStack.should_expand()) {
1154     _markStack.expand();
1155   }
1156 
1157   // Statistics
1158   double now = os::elapsedTime();
1159   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1160   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1161   _remark_times.add((now - start) * 1000.0);
1162 
1163   g1p->record_concurrent_mark_remark_end();
1164 
1165   G1CMIsAliveClosure is_alive(g1h);
1166   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1167 }
1168 
1169 // Base class of the closures that finalize and verify the
1170 // liveness counting data.
1171 class G1CMCountDataClosureBase: public HeapRegionClosure {
1172 protected:
1173   G1CollectedHeap* _g1h;
1174   G1ConcurrentMark* _cm;
1175   CardTableModRefBS* _ct_bs;
1176 
1177   BitMap* _region_bm;
1178   BitMap* _card_bm;
1179 
1180   // Takes a region that's not empty (i.e., it has at least one
1181   // live object in it and sets its corresponding bit on the region
1182   // bitmap to 1.
1183   void set_bit_for_region(HeapRegion* hr) {
1184     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1185     _region_bm->par_at_put(index, true);
1186   }


1735   }
1736 
1737   g1h->verifier()->check_bitmaps("Cleanup End");
1738 
1739   g1h->verifier()->verify_region_sets_optional();
1740 
1741   // We need to make this be a "collection" so any collection pause that
1742   // races with it goes around and waits for completeCleanup to finish.
1743   g1h->increment_total_collections();
1744 
1745   // Clean out dead classes and update Metaspace sizes.
1746   if (ClassUnloadingWithConcurrentMark) {
1747     ClassLoaderDataGraph::purge();
1748   }
1749   MetaspaceGC::compute_new_size();
1750 
1751   // We reclaimed old regions so we should calculate the sizes to make
1752   // sure we update the old gen/space data.
1753   g1h->g1mm()->update_sizes();
1754   g1h->allocation_context_stats().update_after_mark();
1755 
1756   g1h->trace_heap_after_concurrent_cycle();
1757 }
1758 
1759 void G1ConcurrentMark::completeCleanup() {
1760   if (has_aborted()) return;
1761 
1762   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1763 
1764   _cleanup_list.verify_optional();
1765   FreeRegionList tmp_free_list("Tmp Free List");
1766 
1767   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1768                                   "cleanup list has %u entries",
1769                                   _cleanup_list.length());
1770 
1771   // No one else should be accessing the _cleanup_list at this point,
1772   // so it is not necessary to take any locks
1773   while (!_cleanup_list.is_empty()) {
1774     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1775     assert(hr != NULL, "Got NULL from a non-empty list");
1776     hr->par_clear();


2028     // Skip processing the discovered references if we have
2029     // overflown the global marking stack. Reference objects
2030     // only get discovered once so it is OK to not
2031     // de-populate the discovered reference lists. We could have,
2032     // but the only benefit would be that, when marking restarts,
2033     // less reference objects are discovered.
2034     return;
2035   }
2036 
2037   ResourceMark rm;
2038   HandleMark   hm;
2039 
2040   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2041 
2042   // Is alive closure.
2043   G1CMIsAliveClosure g1_is_alive(g1h);
2044 
2045   // Inner scope to exclude the cleaning of the string and symbol
2046   // tables from the displayed time.
2047   {
2048     GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm());
2049 
2050     ReferenceProcessor* rp = g1h->ref_processor_cm();
2051 
2052     // See the comment in G1CollectedHeap::ref_processing_init()
2053     // about how reference processing currently works in G1.
2054 
2055     // Set the soft reference policy
2056     rp->setup_policy(clear_all_soft_refs);
2057     assert(_markStack.isEmpty(), "mark stack should be empty");
2058 
2059     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2060     // in serial reference processing. Note these closures are also
2061     // used for serially processing (by the the current thread) the
2062     // JNI references during parallel reference processing.
2063     //
2064     // These closures do not need to synchronize with the worker
2065     // threads involved in parallel reference processing as these
2066     // instances are executed serially by the current thread (e.g.
2067     // reference processing is not multi-threaded and is thus
2068     // performed by the current thread instead of a gang worker).


2085     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2086                                               g1h->workers(), active_workers);
2087     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2088 
2089     // Set the concurrency level. The phase was already set prior to
2090     // executing the remark task.
2091     set_concurrency(active_workers);
2092 
2093     // Set the degree of MT processing here.  If the discovery was done MT,
2094     // the number of threads involved during discovery could differ from
2095     // the number of active workers.  This is OK as long as the discovered
2096     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2097     rp->set_active_mt_degree(active_workers);
2098 
2099     // Process the weak references.
2100     const ReferenceProcessorStats& stats =
2101         rp->process_discovered_references(&g1_is_alive,
2102                                           &g1_keep_alive,
2103                                           &g1_drain_mark_stack,
2104                                           executor,
2105                                           g1h->gc_timer_cm());
2106     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2107 
2108     // The do_oop work routines of the keep_alive and drain_marking_stack
2109     // oop closures will set the has_overflown flag if we overflow the
2110     // global marking stack.
2111 
2112     assert(_markStack.overflow() || _markStack.isEmpty(),
2113             "mark stack should be empty (unless it overflowed)");
2114 
2115     if (_markStack.overflow()) {
2116       // This should have been done already when we tried to push an
2117       // entry on to the global mark stack. But let's do it again.
2118       set_has_overflown();
2119     }
2120 
2121     assert(rp->num_q() == active_workers, "why not");
2122 
2123     rp->enqueue_discovered_references(executor);
2124 
2125     rp->verify_no_references_recorded();
2126     assert(!rp->discovery_enabled(), "Post condition");
2127   }
2128 
2129   if (has_overflown()) {
2130     // We can not trust g1_is_alive if the marking stack overflowed
2131     return;
2132   }
2133 
2134   assert(_markStack.isEmpty(), "Marking should have completed");
2135 
2136   // Unload Klasses, String, Symbols, Code Cache, etc.
2137   {
2138     GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
2139 
2140     if (ClassUnloadingWithConcurrentMark) {
2141       bool purged_classes;
2142 
2143       {
2144         GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
2145         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2146       }
2147 
2148       {
2149         GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
2150         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2151       }
2152     }
2153 
2154     if (G1StringDedup::is_enabled()) {
2155       GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
2156       G1StringDedup::unlink(&g1_is_alive);
2157     }
2158   }
2159 }
2160 
2161 void G1ConcurrentMark::swapMarkBitMaps() {
2162   G1CMBitMapRO* temp = _prevMarkBitMap;
2163   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
2164   _nextMarkBitMap    = (G1CMBitMap*)  temp;
2165 }
2166 
2167 // Closure for marking entries in SATB buffers.
2168 class G1CMSATBBufferClosure : public SATBBufferClosure {
2169 private:
2170   G1CMTask* _task;
2171   G1CollectedHeap* _g1h;
2172 
2173   // This is very similar to G1CMTask::deal_with_reference, but with
2174   // more relaxed requirements for the argument, so this must be more
2175   // circumspect about treating the argument as an object.


2256                               true         /* do_termination       */,
2257                               false        /* is_serial            */);
2258       } while (task->has_aborted() && !_cm->has_overflown());
2259       // If we overflow, then we do not want to restart. We instead
2260       // want to abort remark and do concurrent marking again.
2261       task->record_end_time();
2262     }
2263   }
2264 
2265   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
2266     AbstractGangTask("Par Remark"), _cm(cm) {
2267     _cm->terminator()->reset_for_reuse(active_workers);
2268   }
2269 };
2270 
2271 void G1ConcurrentMark::checkpointRootsFinalWork() {
2272   ResourceMark rm;
2273   HandleMark   hm;
2274   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2275 
2276   GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
2277 
2278   g1h->ensure_parsability(false);
2279 
2280   // this is remark, so we'll use up all active threads
2281   uint active_workers = g1h->workers()->active_workers();
2282   set_concurrency_and_phase(active_workers, false /* concurrent */);
2283   // Leave _parallel_marking_threads at it's
2284   // value originally calculated in the G1ConcurrentMark
2285   // constructor and pass values of the active workers
2286   // through the gang in the task.
2287 
2288   {
2289     StrongRootsScope srs(active_workers);
2290 
2291     G1CMRemarkTask remarkTask(this, active_workers);
2292     // We will start all available threads, even if we decide that the
2293     // active_workers will be fewer. The extra ones will just bail out
2294     // immediately.
2295     g1h->workers()->run_task(&remarkTask);
2296   }


2612   // a full GC against the previous bitmap.
2613 
2614   // Clear the liveness counting data
2615   clear_all_count_data();
2616   // Empty mark stack
2617   reset_marking_state();
2618   for (uint i = 0; i < _max_worker_id; ++i) {
2619     _tasks[i]->clear_region_fields();
2620   }
2621   _first_overflow_barrier_sync.abort();
2622   _second_overflow_barrier_sync.abort();
2623   _has_aborted = true;
2624 
2625   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2626   satb_mq_set.abandon_partial_marking();
2627   // This can be called either during or outside marking, we'll read
2628   // the expected_active value from the SATB queue set.
2629   satb_mq_set.set_active_all_threads(
2630                                  false, /* new active value */
2631                                  satb_mq_set.is_active() /* expected_active */);
2632 
2633   _g1h->trace_heap_after_concurrent_cycle();
2634 
2635   _g1h->register_concurrent_cycle_end();
2636 }
2637 
2638 static void print_ms_time_info(const char* prefix, const char* name,
2639                                NumberSeq& ns) {
2640   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2641                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2642   if (ns.num() > 0) {
2643     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2644                            prefix, ns.sd(), ns.maximum());
2645   }
2646 }
2647 
2648 void G1ConcurrentMark::print_summary_info() {
2649   LogHandle(gc, marking) log;
2650   if (!log.is_trace()) {
2651     return;
2652   }
2653 
2654   log.trace(" Concurrent marking:");
2655   print_ms_time_info("  ", "init marks", _init_times);




 424             CardTableModRefBS::card_shift,
 425             false /* in_resource_area*/),
 426 
 427   _prevMarkBitMap(&_markBitMap1),
 428   _nextMarkBitMap(&_markBitMap2),
 429 
 430   _markStack(this),
 431   // _finger set in set_non_marking_state
 432 
 433   _max_worker_id(ParallelGCThreads),
 434   // _active_tasks set in set_non_marking_state
 435   // _tasks set inside the constructor
 436   _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
 437   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 438 
 439   _has_overflown(false),
 440   _concurrent(false),
 441   _has_aborted(false),
 442   _restart_for_overflow(false),
 443   _concurrent_marking_in_progress(false),
 444   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 445   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 446 
 447   // _verbose_level set below
 448 
 449   _init_times(),
 450   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 451   _cleanup_times(),
 452   _total_counting_time(0.0),
 453   _total_rs_scrub_time(0.0),
 454 
 455   _parallel_workers(NULL),
 456 
 457   _count_card_bitmaps(NULL),
 458   _count_marked_bytes(NULL),
 459   _completed_initialization(false) {
 460 
 461   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 462   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 463 
 464   // Create & start a ConcurrentMark thread.
 465   _cmThread = new ConcurrentMarkThread(this);


 991   if (root_regions()->scan_in_progress()) {
 992     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 993     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 994 
 995     _parallel_marking_threads = calc_parallel_marking_threads();
 996     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
 997            "Maximum number of marking threads exceeded");
 998     uint active_workers = MAX2(1U, parallel_marking_threads());
 999 
1000     G1CMRootRegionScanTask task(this);
1001     _parallel_workers->set_active_workers(active_workers);
1002     _parallel_workers->run_task(&task);
1003 
1004     // It's possible that has_aborted() is true here without actually
1005     // aborting the survivor scan earlier. This is OK as it's
1006     // mainly used for sanity checking.
1007     root_regions()->scan_finished();
1008   }
1009 }
1010 
1011 void G1ConcurrentMark::concurrent_cycle_start() {
1012   _gc_timer_cm->register_gc_start();
1013 
1014   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
1015 
1016   _g1h->trace_heap_before_gc(_gc_tracer_cm);
1017 }
1018 
1019 void G1ConcurrentMark::concurrent_cycle_end() {
1020   _g1h->trace_heap_after_gc(_gc_tracer_cm);


1021 
1022   if (has_aborted()) {
1023     _gc_tracer_cm->report_concurrent_mode_failure();














1024   }

1025 
1026   _gc_timer_cm->register_gc_end();


1027 
1028   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());

1029 }
1030 
1031 void G1ConcurrentMark::markFromRoots() {
1032   // we might be tempted to assert that:
1033   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1034   //        "inconsistent argument?");
1035   // However that wouldn't be right, because it's possible that
1036   // a safepoint is indeed in progress as a younger generation
1037   // stop-the-world GC happens even as we mark in this generation.
1038 
1039   _restart_for_overflow = false;
1040 
1041   // _g1h has _n_par_threads
1042   _parallel_marking_threads = calc_parallel_marking_threads();
1043   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1044     "Maximum number of marking threads exceeded");
1045 
1046   uint active_workers = MAX2(1U, parallel_marking_threads());
1047   assert(active_workers > 0, "Should have been set");
1048 


1088 
1089   weakRefsWork(clear_all_soft_refs);
1090 
1091   if (has_overflown()) {
1092     // Oops.  We overflowed.  Restart concurrent marking.
1093     _restart_for_overflow = true;
1094     log_develop_trace(gc)("Remark led to restart for overflow.");
1095 
1096     // Verify the heap w.r.t. the previous marking bitmap.
1097     if (VerifyDuringGC) {
1098       HandleMark hm;  // handle scope
1099       g1h->prepare_for_verify();
1100       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1101     }
1102 
1103     // Clear the marking state because we will be restarting
1104     // marking due to overflowing the global mark stack.
1105     reset_marking_state();
1106   } else {
1107     {
1108       GCTraceTime(Debug, gc) trace("Aggregate Data", _gc_timer_cm);
1109 
1110       // Aggregate the per-task counting data that we have accumulated
1111       // while marking.
1112       aggregate_count_data();
1113     }
1114 
1115     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1116     // We're done with marking.
1117     // This is the end of  the marking cycle, we're expected all
1118     // threads to have SATB queues with active set to true.
1119     satb_mq_set.set_active_all_threads(false, /* new active value */
1120                                        true /* expected_active */);
1121 
1122     if (VerifyDuringGC) {
1123       HandleMark hm;  // handle scope
1124       g1h->prepare_for_verify();
1125       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1126     }
1127     g1h->verifier()->check_bitmaps("Remark End");
1128     assert(!restart_for_overflow(), "sanity");
1129     // Completely reset the marking state since marking completed
1130     set_non_marking_state();
1131   }
1132 
1133   // Expand the marking stack, if we have to and if we can.
1134   if (_markStack.should_expand()) {
1135     _markStack.expand();
1136   }
1137 
1138   // Statistics
1139   double now = os::elapsedTime();
1140   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1141   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1142   _remark_times.add((now - start) * 1000.0);
1143 
1144   g1p->record_concurrent_mark_remark_end();
1145 
1146   G1CMIsAliveClosure is_alive(g1h);
1147   _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1148 }
1149 
1150 // Base class of the closures that finalize and verify the
1151 // liveness counting data.
1152 class G1CMCountDataClosureBase: public HeapRegionClosure {
1153 protected:
1154   G1CollectedHeap* _g1h;
1155   G1ConcurrentMark* _cm;
1156   CardTableModRefBS* _ct_bs;
1157 
1158   BitMap* _region_bm;
1159   BitMap* _card_bm;
1160 
1161   // Takes a region that's not empty (i.e., it has at least one
1162   // live object in it and sets its corresponding bit on the region
1163   // bitmap to 1.
1164   void set_bit_for_region(HeapRegion* hr) {
1165     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1166     _region_bm->par_at_put(index, true);
1167   }


1716   }
1717 
1718   g1h->verifier()->check_bitmaps("Cleanup End");
1719 
1720   g1h->verifier()->verify_region_sets_optional();
1721 
1722   // We need to make this be a "collection" so any collection pause that
1723   // races with it goes around and waits for completeCleanup to finish.
1724   g1h->increment_total_collections();
1725 
1726   // Clean out dead classes and update Metaspace sizes.
1727   if (ClassUnloadingWithConcurrentMark) {
1728     ClassLoaderDataGraph::purge();
1729   }
1730   MetaspaceGC::compute_new_size();
1731 
1732   // We reclaimed old regions so we should calculate the sizes to make
1733   // sure we update the old gen/space data.
1734   g1h->g1mm()->update_sizes();
1735   g1h->allocation_context_stats().update_after_mark();


1736 }
1737 
1738 void G1ConcurrentMark::completeCleanup() {
1739   if (has_aborted()) return;
1740 
1741   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1742 
1743   _cleanup_list.verify_optional();
1744   FreeRegionList tmp_free_list("Tmp Free List");
1745 
1746   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1747                                   "cleanup list has %u entries",
1748                                   _cleanup_list.length());
1749 
1750   // No one else should be accessing the _cleanup_list at this point,
1751   // so it is not necessary to take any locks
1752   while (!_cleanup_list.is_empty()) {
1753     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1754     assert(hr != NULL, "Got NULL from a non-empty list");
1755     hr->par_clear();


2007     // Skip processing the discovered references if we have
2008     // overflown the global marking stack. Reference objects
2009     // only get discovered once so it is OK to not
2010     // de-populate the discovered reference lists. We could have,
2011     // but the only benefit would be that, when marking restarts,
2012     // less reference objects are discovered.
2013     return;
2014   }
2015 
2016   ResourceMark rm;
2017   HandleMark   hm;
2018 
2019   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2020 
2021   // Is alive closure.
2022   G1CMIsAliveClosure g1_is_alive(g1h);
2023 
2024   // Inner scope to exclude the cleaning of the string and symbol
2025   // tables from the displayed time.
2026   {
2027     GCTraceTime(Debug, gc) trace("Reference Processing", _gc_timer_cm);
2028 
2029     ReferenceProcessor* rp = g1h->ref_processor_cm();
2030 
2031     // See the comment in G1CollectedHeap::ref_processing_init()
2032     // about how reference processing currently works in G1.
2033 
2034     // Set the soft reference policy
2035     rp->setup_policy(clear_all_soft_refs);
2036     assert(_markStack.isEmpty(), "mark stack should be empty");
2037 
2038     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2039     // in serial reference processing. Note these closures are also
2040     // used for serially processing (by the the current thread) the
2041     // JNI references during parallel reference processing.
2042     //
2043     // These closures do not need to synchronize with the worker
2044     // threads involved in parallel reference processing as these
2045     // instances are executed serially by the current thread (e.g.
2046     // reference processing is not multi-threaded and is thus
2047     // performed by the current thread instead of a gang worker).


2064     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2065                                               g1h->workers(), active_workers);
2066     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2067 
2068     // Set the concurrency level. The phase was already set prior to
2069     // executing the remark task.
2070     set_concurrency(active_workers);
2071 
2072     // Set the degree of MT processing here.  If the discovery was done MT,
2073     // the number of threads involved during discovery could differ from
2074     // the number of active workers.  This is OK as long as the discovered
2075     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2076     rp->set_active_mt_degree(active_workers);
2077 
2078     // Process the weak references.
2079     const ReferenceProcessorStats& stats =
2080         rp->process_discovered_references(&g1_is_alive,
2081                                           &g1_keep_alive,
2082                                           &g1_drain_mark_stack,
2083                                           executor,
2084                                           _gc_timer_cm);
2085     _gc_tracer_cm->report_gc_reference_stats(stats);
2086 
2087     // The do_oop work routines of the keep_alive and drain_marking_stack
2088     // oop closures will set the has_overflown flag if we overflow the
2089     // global marking stack.
2090 
2091     assert(_markStack.overflow() || _markStack.isEmpty(),
2092             "mark stack should be empty (unless it overflowed)");
2093 
2094     if (_markStack.overflow()) {
2095       // This should have been done already when we tried to push an
2096       // entry on to the global mark stack. But let's do it again.
2097       set_has_overflown();
2098     }
2099 
2100     assert(rp->num_q() == active_workers, "why not");
2101 
2102     rp->enqueue_discovered_references(executor);
2103 
2104     rp->verify_no_references_recorded();
2105     assert(!rp->discovery_enabled(), "Post condition");
2106   }
2107 
2108   if (has_overflown()) {
2109     // We can not trust g1_is_alive if the marking stack overflowed
2110     return;
2111   }
2112 
2113   assert(_markStack.isEmpty(), "Marking should have completed");
2114 
2115   // Unload Klasses, String, Symbols, Code Cache, etc.
2116   {
2117     GCTraceTime(Debug, gc) trace("Unloading", _gc_timer_cm);
2118 
2119     if (ClassUnloadingWithConcurrentMark) {
2120       bool purged_classes;
2121 
2122       {
2123         GCTraceTime(Trace, gc) trace("System Dictionary Unloading", _gc_timer_cm);
2124         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2125       }
2126 
2127       {
2128         GCTraceTime(Trace, gc) trace("Parallel Unloading", _gc_timer_cm);
2129         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2130       }
2131     }
2132 
2133     if (G1StringDedup::is_enabled()) {
2134       GCTraceTime(Trace, gc) trace("String Deduplication Unlink", _gc_timer_cm);
2135       G1StringDedup::unlink(&g1_is_alive);
2136     }
2137   }
2138 }
2139 
2140 void G1ConcurrentMark::swapMarkBitMaps() {
2141   G1CMBitMapRO* temp = _prevMarkBitMap;
2142   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
2143   _nextMarkBitMap    = (G1CMBitMap*)  temp;
2144 }
2145 
2146 // Closure for marking entries in SATB buffers.
2147 class G1CMSATBBufferClosure : public SATBBufferClosure {
2148 private:
2149   G1CMTask* _task;
2150   G1CollectedHeap* _g1h;
2151 
2152   // This is very similar to G1CMTask::deal_with_reference, but with
2153   // more relaxed requirements for the argument, so this must be more
2154   // circumspect about treating the argument as an object.


2235                               true         /* do_termination       */,
2236                               false        /* is_serial            */);
2237       } while (task->has_aborted() && !_cm->has_overflown());
2238       // If we overflow, then we do not want to restart. We instead
2239       // want to abort remark and do concurrent marking again.
2240       task->record_end_time();
2241     }
2242   }
2243 
2244   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
2245     AbstractGangTask("Par Remark"), _cm(cm) {
2246     _cm->terminator()->reset_for_reuse(active_workers);
2247   }
2248 };
2249 
2250 void G1ConcurrentMark::checkpointRootsFinalWork() {
2251   ResourceMark rm;
2252   HandleMark   hm;
2253   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2254 
2255   GCTraceTime(Debug, gc) trace("Finalize Marking", _gc_timer_cm);
2256 
2257   g1h->ensure_parsability(false);
2258 
2259   // this is remark, so we'll use up all active threads
2260   uint active_workers = g1h->workers()->active_workers();
2261   set_concurrency_and_phase(active_workers, false /* concurrent */);
2262   // Leave _parallel_marking_threads at it's
2263   // value originally calculated in the G1ConcurrentMark
2264   // constructor and pass values of the active workers
2265   // through the gang in the task.
2266 
2267   {
2268     StrongRootsScope srs(active_workers);
2269 
2270     G1CMRemarkTask remarkTask(this, active_workers);
2271     // We will start all available threads, even if we decide that the
2272     // active_workers will be fewer. The extra ones will just bail out
2273     // immediately.
2274     g1h->workers()->run_task(&remarkTask);
2275   }


2591   // a full GC against the previous bitmap.
2592 
2593   // Clear the liveness counting data
2594   clear_all_count_data();
2595   // Empty mark stack
2596   reset_marking_state();
2597   for (uint i = 0; i < _max_worker_id; ++i) {
2598     _tasks[i]->clear_region_fields();
2599   }
2600   _first_overflow_barrier_sync.abort();
2601   _second_overflow_barrier_sync.abort();
2602   _has_aborted = true;
2603 
2604   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2605   satb_mq_set.abandon_partial_marking();
2606   // This can be called either during or outside marking, we'll read
2607   // the expected_active value from the SATB queue set.
2608   satb_mq_set.set_active_all_threads(
2609                                  false, /* new active value */
2610                                  satb_mq_set.is_active() /* expected_active */);




2611 }
2612 
2613 static void print_ms_time_info(const char* prefix, const char* name,
2614                                NumberSeq& ns) {
2615   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2616                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2617   if (ns.num() > 0) {
2618     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2619                            prefix, ns.sd(), ns.maximum());
2620   }
2621 }
2622 
2623 void G1ConcurrentMark::print_summary_info() {
2624   LogHandle(gc, marking) log;
2625   if (!log.is_trace()) {
2626     return;
2627   }
2628 
2629   log.trace(" Concurrent marking:");
2630   print_ms_time_info("  ", "init marks", _init_times);


< prev index next >