src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"



  39 #include "memory/genOopClosures.inline.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "services/memTracker.hpp"
  46 
  47 // Concurrent marking bit map wrapper
  48 
  49 CMBitMapRO::CMBitMapRO(int shifter) :
  50   _bm(),
  51   _shifter(shifter) {
  52   _bmStartWord = 0;
  53   _bmWordSize = 0;
  54 }
  55 
  56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  57                                                HeapWord* limit) const {
  58   // First we must round addr *up* to a possible object boundary.


1325       Universe::verify(VerifyOption_G1UseNextMarking,
1326                        " VerifyDuringGC:(after)");
1327     }
1328     assert(!restart_for_overflow(), "sanity");
1329     // Completely reset the marking state since marking completed
1330     set_non_marking_state();
1331   }
1332 
1333   // Expand the marking stack, if we have to and if we can.
1334   if (_markStack.should_expand()) {
1335     _markStack.expand();
1336   }
1337 
1338   // Statistics
1339   double now = os::elapsedTime();
1340   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1341   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1342   _remark_times.add((now - start) * 1000.0);
1343 
1344   g1p->record_concurrent_mark_remark_end();



1345 }
1346 
1347 // Base class of the closures that finalize and verify the
1348 // liveness counting data.
1349 class CMCountDataClosureBase: public HeapRegionClosure {
1350 protected:
1351   G1CollectedHeap* _g1h;
1352   ConcurrentMark* _cm;
1353   CardTableModRefBS* _ct_bs;
1354 
1355   BitMap* _region_bm;
1356   BitMap* _card_bm;
1357 
1358   // Takes a region that's not empty (i.e., it has at least one
1359   // live object in it and sets its corresponding bit on the region
1360   // bitmap to 1. If the region is "starts humongous" it will also set
1361   // to 1 the bits on the region bitmap that correspond to its
1362   // associated "continues humongous" regions.
1363   void set_bit_for_region(HeapRegion* hr) {
1364     assert(!hr->continuesHumongous(), "should have filtered those out");


2112   // Clean up will have freed any regions completely full of garbage.
2113   // Update the soft reference policy with the new heap occupancy.
2114   Universe::update_heap_info_at_gc();
2115 
2116   // We need to make this be a "collection" so any collection pause that
2117   // races with it goes around and waits for completeCleanup to finish.
2118   g1h->increment_total_collections();
2119 
2120   // We reclaimed old regions so we should calculate the sizes to make
2121   // sure we update the old gen/space data.
2122   g1h->g1mm()->update_sizes();
2123 
2124   if (VerifyDuringGC) {
2125     HandleMark hm;  // handle scope
2126     Universe::heap()->prepare_for_verify();
2127     Universe::verify(VerifyOption_G1UsePrevMarking,
2128                      " VerifyDuringGC:(after)");
2129   }
2130 
2131   g1h->verify_region_sets_optional();

2132 }
2133 
2134 void ConcurrentMark::completeCleanup() {
2135   if (has_aborted()) return;
2136 
2137   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2138 
2139   _cleanup_list.verify_optional();
2140   FreeRegionList tmp_free_list("Tmp Free List");
2141 
2142   if (G1ConcRegionFreeingVerbose) {
2143     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2144                            "cleanup list has %u entries",
2145                            _cleanup_list.length());
2146   }
2147 
2148   // Noone else should be accessing the _cleanup_list at this point,
2149   // so it's not necessary to take any locks
2150   while (!_cleanup_list.is_empty()) {
2151     HeapRegion* hr = _cleanup_list.remove_head();


2422     // de-populate the discovered reference lists. We could have,
2423     // but the only benefit would be that, when marking restarts,
2424     // less reference objects are discovered.
2425     return;
2426   }
2427 
2428   ResourceMark rm;
2429   HandleMark   hm;
2430 
2431   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2432 
2433   // Is alive closure.
2434   G1CMIsAliveClosure g1_is_alive(g1h);
2435 
2436   // Inner scope to exclude the cleaning of the string and symbol
2437   // tables from the displayed time.
2438   {
2439     if (G1Log::finer()) {
2440       gclog_or_tty->put(' ');
2441     }
2442     TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2443 
2444     ReferenceProcessor* rp = g1h->ref_processor_cm();
2445 
2446     // See the comment in G1CollectedHeap::ref_processing_init()
2447     // about how reference processing currently works in G1.
2448 
2449     // Set the soft reference policy
2450     rp->setup_policy(clear_all_soft_refs);
2451     assert(_markStack.isEmpty(), "mark stack should be empty");
2452 
2453     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2454     // in serial reference processing. Note these closures are also
2455     // used for serially processing (by the the current thread) the
2456     // JNI references during parallel reference processing.
2457     //
2458     // These closures do not need to synchronize with the worker
2459     // threads involved in parallel reference processing as these
2460     // instances are executed serially by the current thread (e.g.
2461     // reference processing is not multi-threaded and is thus
2462     // performed by the current thread instead of a gang worker).


2474     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2475     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2476     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2477 
2478     // Parallel processing task executor.
2479     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2480                                               g1h->workers(), active_workers);
2481     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2482 
2483     // Set the concurrency level. The phase was already set prior to
2484     // executing the remark task.
2485     set_concurrency(active_workers);
2486 
2487     // Set the degree of MT processing here.  If the discovery was done MT,
2488     // the number of threads involved during discovery could differ from
2489     // the number of active workers.  This is OK as long as the discovered
2490     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2491     rp->set_active_mt_degree(active_workers);
2492 
2493     // Process the weak references.

2494     rp->process_discovered_references(&g1_is_alive,
2495                                       &g1_keep_alive,
2496                                       &g1_drain_mark_stack,
2497                                       executor);


2498 
2499     // The do_oop work routines of the keep_alive and drain_marking_stack
2500     // oop closures will set the has_overflown flag if we overflow the
2501     // global marking stack.
2502 
2503     assert(_markStack.overflow() || _markStack.isEmpty(),
2504             "mark stack should be empty (unless it overflowed)");
2505 
2506     if (_markStack.overflow()) {
2507       // This should have been done already when we tried to push an
2508       // entry on to the global mark stack. But let's do it again.
2509       set_has_overflown();
2510     }
2511 
2512     assert(rp->num_q() == active_workers, "why not");
2513 
2514     rp->enqueue_discovered_references(executor);
2515 
2516     rp->verify_no_references_recorded();
2517     assert(!rp->discovery_enabled(), "Post condition");


3210 // abandon current marking iteration due to a Full GC
3211 void ConcurrentMark::abort() {
3212   // Clear all marks to force marking thread to do nothing
3213   _nextMarkBitMap->clearAll();
3214   // Clear the liveness counting data
3215   clear_all_count_data();
3216   // Empty mark stack
3217   reset_marking_state();
3218   for (uint i = 0; i < _max_worker_id; ++i) {
3219     _tasks[i]->clear_region_fields();
3220   }
3221   _has_aborted = true;
3222 
3223   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3224   satb_mq_set.abandon_partial_marking();
3225   // This can be called either during or outside marking, we'll read
3226   // the expected_active value from the SATB queue set.
3227   satb_mq_set.set_active_all_threads(
3228                                  false, /* new active value */
3229                                  satb_mq_set.is_active() /* expected_active */);



3230 }
3231 
3232 static void print_ms_time_info(const char* prefix, const char* name,
3233                                NumberSeq& ns) {
3234   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3235                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3236   if (ns.num() > 0) {
3237     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3238                            prefix, ns.sd(), ns.maximum());
3239   }
3240 }
3241 
3242 void ConcurrentMark::print_summary_info() {
3243   gclog_or_tty->print_cr(" Concurrent marking:");
3244   print_ms_time_info("  ", "init marks", _init_times);
3245   print_ms_time_info("  ", "remarks", _remark_times);
3246   {
3247     print_ms_time_info("     ", "final marks", _remark_mark_times);
3248     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3249 




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "gc_implementation/shared/gcTimer.hpp"
  40 #include "gc_implementation/shared/gcTrace.hpp"
  41 #include "gc_implementation/shared/gcTraceTime.hpp"
  42 #include "memory/genOopClosures.inline.hpp"
  43 #include "memory/referencePolicy.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "services/memTracker.hpp"
  49 
  50 // Concurrent marking bit map wrapper
  51 
  52 CMBitMapRO::CMBitMapRO(int shifter) :
  53   _bm(),
  54   _shifter(shifter) {
  55   _bmStartWord = 0;
  56   _bmWordSize = 0;
  57 }
  58 
  59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  60                                                HeapWord* limit) const {
  61   // First we must round addr *up* to a possible object boundary.


1328       Universe::verify(VerifyOption_G1UseNextMarking,
1329                        " VerifyDuringGC:(after)");
1330     }
1331     assert(!restart_for_overflow(), "sanity");
1332     // Completely reset the marking state since marking completed
1333     set_non_marking_state();
1334   }
1335 
1336   // Expand the marking stack, if we have to and if we can.
1337   if (_markStack.should_expand()) {
1338     _markStack.expand();
1339   }
1340 
1341   // Statistics
1342   double now = os::elapsedTime();
1343   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1344   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1345   _remark_times.add((now - start) * 1000.0);
1346 
1347   g1p->record_concurrent_mark_remark_end();
1348 
1349   G1CMIsAliveClosure is_alive(g1h);
1350   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1351 }
1352 
1353 // Base class of the closures that finalize and verify the
1354 // liveness counting data.
1355 class CMCountDataClosureBase: public HeapRegionClosure {
1356 protected:
1357   G1CollectedHeap* _g1h;
1358   ConcurrentMark* _cm;
1359   CardTableModRefBS* _ct_bs;
1360 
1361   BitMap* _region_bm;
1362   BitMap* _card_bm;
1363 
1364   // Takes a region that's not empty (i.e., it has at least one
1365   // live object in it and sets its corresponding bit on the region
1366   // bitmap to 1. If the region is "starts humongous" it will also set
1367   // to 1 the bits on the region bitmap that correspond to its
1368   // associated "continues humongous" regions.
1369   void set_bit_for_region(HeapRegion* hr) {
1370     assert(!hr->continuesHumongous(), "should have filtered those out");


2118   // Clean up will have freed any regions completely full of garbage.
2119   // Update the soft reference policy with the new heap occupancy.
2120   Universe::update_heap_info_at_gc();
2121 
2122   // We need to make this be a "collection" so any collection pause that
2123   // races with it goes around and waits for completeCleanup to finish.
2124   g1h->increment_total_collections();
2125 
2126   // We reclaimed old regions so we should calculate the sizes to make
2127   // sure we update the old gen/space data.
2128   g1h->g1mm()->update_sizes();
2129 
2130   if (VerifyDuringGC) {
2131     HandleMark hm;  // handle scope
2132     Universe::heap()->prepare_for_verify();
2133     Universe::verify(VerifyOption_G1UsePrevMarking,
2134                      " VerifyDuringGC:(after)");
2135   }
2136 
2137   g1h->verify_region_sets_optional();
2138   g1h->trace_heap_after_concurrent_cycle();
2139 }
2140 
2141 void ConcurrentMark::completeCleanup() {
2142   if (has_aborted()) return;
2143 
2144   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2145 
2146   _cleanup_list.verify_optional();
2147   FreeRegionList tmp_free_list("Tmp Free List");
2148 
2149   if (G1ConcRegionFreeingVerbose) {
2150     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2151                            "cleanup list has %u entries",
2152                            _cleanup_list.length());
2153   }
2154 
2155   // Noone else should be accessing the _cleanup_list at this point,
2156   // so it's not necessary to take any locks
2157   while (!_cleanup_list.is_empty()) {
2158     HeapRegion* hr = _cleanup_list.remove_head();


2429     // de-populate the discovered reference lists. We could have,
2430     // but the only benefit would be that, when marking restarts,
2431     // less reference objects are discovered.
2432     return;
2433   }
2434 
2435   ResourceMark rm;
2436   HandleMark   hm;
2437 
2438   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2439 
2440   // Is alive closure.
2441   G1CMIsAliveClosure g1_is_alive(g1h);
2442 
2443   // Inner scope to exclude the cleaning of the string and symbol
2444   // tables from the displayed time.
2445   {
2446     if (G1Log::finer()) {
2447       gclog_or_tty->put(' ');
2448     }
2449     GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
2450 
2451     ReferenceProcessor* rp = g1h->ref_processor_cm();
2452 
2453     // See the comment in G1CollectedHeap::ref_processing_init()
2454     // about how reference processing currently works in G1.
2455 
2456     // Set the soft reference policy
2457     rp->setup_policy(clear_all_soft_refs);
2458     assert(_markStack.isEmpty(), "mark stack should be empty");
2459 
2460     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2461     // in serial reference processing. Note these closures are also
2462     // used for serially processing (by the the current thread) the
2463     // JNI references during parallel reference processing.
2464     //
2465     // These closures do not need to synchronize with the worker
2466     // threads involved in parallel reference processing as these
2467     // instances are executed serially by the current thread (e.g.
2468     // reference processing is not multi-threaded and is thus
2469     // performed by the current thread instead of a gang worker).


2481     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2482     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2483     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2484 
2485     // Parallel processing task executor.
2486     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2487                                               g1h->workers(), active_workers);
2488     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2489 
2490     // Set the concurrency level. The phase was already set prior to
2491     // executing the remark task.
2492     set_concurrency(active_workers);
2493 
2494     // Set the degree of MT processing here.  If the discovery was done MT,
2495     // the number of threads involved during discovery could differ from
2496     // the number of active workers.  This is OK as long as the discovered
2497     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2498     rp->set_active_mt_degree(active_workers);
2499 
2500     // Process the weak references.
2501     const ReferenceProcessorStats& stats =
2502         rp->process_discovered_references(&g1_is_alive,
2503                                           &g1_keep_alive,
2504                                           &g1_drain_mark_stack,
2505                                           executor,
2506                                           g1h->gc_timer_cm());
2507     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2508 
2509     // The do_oop work routines of the keep_alive and drain_marking_stack
2510     // oop closures will set the has_overflown flag if we overflow the
2511     // global marking stack.
2512 
2513     assert(_markStack.overflow() || _markStack.isEmpty(),
2514             "mark stack should be empty (unless it overflowed)");
2515 
2516     if (_markStack.overflow()) {
2517       // This should have been done already when we tried to push an
2518       // entry on to the global mark stack. But let's do it again.
2519       set_has_overflown();
2520     }
2521 
2522     assert(rp->num_q() == active_workers, "why not");
2523 
2524     rp->enqueue_discovered_references(executor);
2525 
2526     rp->verify_no_references_recorded();
2527     assert(!rp->discovery_enabled(), "Post condition");


3220 // abandon current marking iteration due to a Full GC
3221 void ConcurrentMark::abort() {
3222   // Clear all marks to force marking thread to do nothing
3223   _nextMarkBitMap->clearAll();
3224   // Clear the liveness counting data
3225   clear_all_count_data();
3226   // Empty mark stack
3227   reset_marking_state();
3228   for (uint i = 0; i < _max_worker_id; ++i) {
3229     _tasks[i]->clear_region_fields();
3230   }
3231   _has_aborted = true;
3232 
3233   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3234   satb_mq_set.abandon_partial_marking();
3235   // This can be called either during or outside marking, we'll read
3236   // the expected_active value from the SATB queue set.
3237   satb_mq_set.set_active_all_threads(
3238                                  false, /* new active value */
3239                                  satb_mq_set.is_active() /* expected_active */);
3240 
3241   _g1h->trace_heap_after_concurrent_cycle();
3242   _g1h->register_concurrent_cycle_end();
3243 }
3244 
3245 static void print_ms_time_info(const char* prefix, const char* name,
3246                                NumberSeq& ns) {
3247   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3248                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3249   if (ns.num() > 0) {
3250     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3251                            prefix, ns.sd(), ns.maximum());
3252   }
3253 }
3254 
3255 void ConcurrentMark::print_summary_info() {
3256   gclog_or_tty->print_cr(" Concurrent marking:");
3257   print_ms_time_info("  ", "init marks", _init_times);
3258   print_ms_time_info("  ", "remarks", _remark_times);
3259   {
3260     print_ms_time_info("     ", "final marks", _remark_mark_times);
3261     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3262