src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6323 : 8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin
rev 6326 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled. This causes wrong calculation of PLAB sizes, as the amount of space wasted is not updated correctly.
Reviewed-by: brutisso
rev 6327 : 8019342: G1: High "Other" time most likely due to card redirtying
Summary: Parallelize card redirtying to decrease the time it takes.
Reviewed-by: brutisso


  81 // and allocate_new_tlab, which are the "entry" points to the
  82 // allocation code from the rest of the JVM.  (Note that this does not
  83 // apply to TLAB allocation, which is not part of this interface: it
  84 // is done by clients of this interface.)
  85 
  86 // Notes on implementation of parallelism in different tasks.
  87 //
  88 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  89 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  90 // It does use run_task() which sets _n_workers in the task.
  91 // G1ParTask executes g1_process_strong_roots() ->
  92 // SharedHeap::process_strong_roots() which calls eventually to
  93 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  94 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  95 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  96 //
  97 
  98 // Local to this file.
  99 
 100 class RefineCardTableEntryClosure: public CardTableEntryClosure {
 101   G1RemSet* _g1rs;
 102   ConcurrentG1Refine* _cg1r;
 103   bool _concurrent;
 104 public:
 105   RefineCardTableEntryClosure(G1RemSet* g1rs,
 106                               ConcurrentG1Refine* cg1r) :
 107     _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
 108   {}
 109   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 110     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
 111     // This path is executed by the concurrent refine or mutator threads,
 112     // concurrently, and so we do not care if card_ptr contains references
 113     // that point into the collection set.
 114     assert(!oops_into_cset, "should be");
 115 
 116     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 117       // Caller will actually yield.
 118       return false;
 119     }
 120     // Otherwise, we finished successfully; return true.
 121     return true;
 122   }

 123   void set_concurrent(bool b) { _concurrent = b; }
 124 };
 125 
 126 
 127 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 128   int _calls;
 129   G1CollectedHeap* _g1h;
 130   CardTableModRefBS* _ctbs;
 131   int _histo[256];
 132 public:
 133   ClearLoggedCardTableEntryClosure() :
 134     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
 135   {
 136     for (int i = 0; i < 256; i++) _histo[i] = 0;
 137   }
 138   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 139     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 140       _calls++;
 141       unsigned char* ujb = (unsigned char*)card_ptr;
 142       int ind = (int)(*ujb);


 458   if (hr == NULL) {
 459      // null
 460      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 461      return false;
 462   } else {
 463     return !hr->isHumongous();
 464   }
 465 }
 466 
 467 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 468   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 469   CardTableModRefBS* ct_bs = g1_barrier_set();
 470 
 471   // Count the dirty cards at the start.
 472   CountNonCleanMemRegionClosure count1(this);
 473   ct_bs->mod_card_iterate(&count1);
 474   int orig_count = count1.n();
 475 
 476   // First clear the logged cards.
 477   ClearLoggedCardTableEntryClosure clear;
 478   dcqs.set_closure(&clear);
 479   dcqs.apply_closure_to_all_completed_buffers();
 480   dcqs.iterate_closure_all_threads(false);
 481   clear.print_histo();
 482 
 483   // Now ensure that there's no dirty cards.
 484   CountNonCleanMemRegionClosure count2(this);
 485   ct_bs->mod_card_iterate(&count2);
 486   if (count2.n() != 0) {
 487     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 488                            count2.n(), orig_count);
 489   }
 490   guarantee(count2.n() == 0, "Card table should be clean.");
 491 
 492   RedirtyLoggedCardTableEntryClosure redirty;
 493   JavaThread::dirty_card_queue_set().set_closure(&redirty);
 494   dcqs.apply_closure_to_all_completed_buffers();
 495   dcqs.iterate_closure_all_threads(false);
 496   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 497                          clear.calls(), orig_count);
 498   guarantee(redirty.calls() == clear.calls(),
 499             "Or else mechanism is broken.");
 500 
 501   CountNonCleanMemRegionClosure count3(this);
 502   ct_bs->mod_card_iterate(&count3);
 503   if (count3.n() != orig_count) {
 504     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 505                            orig_count, count3.n());
 506     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 507   }
 508 
 509   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 510 }
 511 
 512 // Private class members.
 513 
 514 G1CollectedHeap* G1CollectedHeap::_g1h;
 515 
 516 // Private methods.
 517 
 518 HeapRegion*
 519 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 520   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 521   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 522     if (!_secondary_free_list.is_empty()) {
 523       if (G1ConcRegionFreeingVerbose) {
 524         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 525                                "secondary_free_list has %u entries",
 526                                _secondary_free_list.length());
 527       }
 528       // It looks as if there are free regions available on the
 529       // secondary_free_list. Let's move them to the free_list and try


1983   // We have to initialize the printer before committing the heap, as
1984   // it will be used then.
1985   _hr_printer.set_active(G1PrintHeapRegions);
1986 
1987   // While there are no constraints in the GC code that HeapWordSize
1988   // be any particular value, there are multiple other areas in the
1989   // system which believe this to be true (e.g. oop->object_size in some
1990   // cases incorrectly returns the size in wordSize units rather than
1991   // HeapWordSize).
1992   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1993 
1994   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1995   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1996   size_t heap_alignment = collector_policy()->heap_alignment();
1997 
1998   // Ensure that the sizes are properly aligned.
1999   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
2000   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2001   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
2002 
2003   _cg1r = new ConcurrentG1Refine(this);


2004 
2005   // Reserve the maximum.
2006 
2007   // When compressed oops are enabled, the preferred heap base
2008   // is calculated by subtracting the requested size from the
2009   // 32Gb boundary and using the result as the base address for
2010   // heap reservation. If the requested size is not aligned to
2011   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2012   // into the ReservedHeapSpace constructor) then the actual
2013   // base of the reserved heap may end up differing from the
2014   // address that was requested (i.e. the preferred heap base).
2015   // If this happens then we could end up using a non-optimal
2016   // compressed oops mode.
2017 
2018   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2019                                                  heap_alignment);
2020 
2021   // It is important to do this in a way such that concurrent readers can't
2022   // temporarily think something is in the heap.  (I've actually seen this
2023   // happen in asserts: DLD.)


2078   // (Must do this late, so that "max_regions" is defined.)
2079   _cm = new ConcurrentMark(this, heap_rs);
2080   if (_cm == NULL || !_cm->completed_initialization()) {
2081     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2082     return JNI_ENOMEM;
2083   }
2084   _cmThread = _cm->cmThread();
2085 
2086   // Initialize the from_card cache structure of HeapRegionRemSet.
2087   HeapRegionRemSet::init_heap(max_regions());
2088 
2089   // Now expand into the initial heap size.
2090   if (!expand(init_byte_size)) {
2091     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2092     return JNI_ENOMEM;
2093   }
2094 
2095   // Perform any initialization actions delegated to the policy.
2096   g1_policy()->init();
2097 
2098   _refine_cte_cl =
2099     new RefineCardTableEntryClosure(g1_rem_set(),
2100                                     concurrent_g1_refine());
2101   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2102 
2103   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2104                                                SATB_Q_FL_lock,
2105                                                G1SATBProcessCompletedThreshold,
2106                                                Shared_SATB_Q_lock);
2107 
2108   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,

2109                                                 DirtyCardQ_FL_lock,
2110                                                 concurrent_g1_refine()->yellow_zone(),
2111                                                 concurrent_g1_refine()->red_zone(),
2112                                                 Shared_DirtyCardQ_lock);
2113 
2114   if (G1DeferredRSUpdate) {
2115     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,

2116                                       DirtyCardQ_FL_lock,
2117                                       -1, // never trigger processing
2118                                       -1, // no limit on length
2119                                       Shared_DirtyCardQ_lock,
2120                                       &JavaThread::dirty_card_queue_set());
2121   }
2122 
2123   // Initialize the card queue set used to hold cards containing
2124   // references into the collection set.
2125   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,

2126                                              DirtyCardQ_FL_lock,
2127                                              -1, // never trigger processing
2128                                              -1, // no limit on length
2129                                              Shared_DirtyCardQ_lock,
2130                                              &JavaThread::dirty_card_queue_set());
2131 
2132   // In case we're keeping closure specialization stats, initialize those
2133   // counts and that mechanism.
2134   SpecializationStats::clear();
2135 
2136   // Here we allocate the dummy full region that is required by the
2137   // G1AllocRegion class. If we don't pass an address in the reserved
2138   // space here, lots of asserts fire.
2139 
2140   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2141                                              _g1_reserved.start());
2142   // We'll re-use the same region whether the alloc region will
2143   // require BOT updates or not and, if it doesn't, then a non-young
2144   // region will complain that it cannot support allocations without
2145   // BOT updates. So we'll tag the dummy region as young to avoid that.


5267     set_par_threads(n_workers);
5268     workers()->run_task(&g1_unlink_task);
5269     set_par_threads(0);
5270   } else {
5271     g1_unlink_task.work(0);
5272   }
5273   if (G1TraceStringSymbolTableScrubbing) {
5274     gclog_or_tty->print_cr("Cleaned string and symbol table, "
5275                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5276                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5277                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5278                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5279   }
5280 
5281   if (G1StringDedup::is_enabled()) {
5282     G1StringDedup::unlink(is_alive);
5283   }
5284 }
5285 
5286 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
5287 public:





5288   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
5289     *card_ptr = CardTableModRefBS::dirty_card_val();

5290     return true;
5291   }
























5292 };
5293 
5294 void G1CollectedHeap::redirty_logged_cards() {
5295   guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
5296   double redirty_logged_cards_start = os::elapsedTime();
5297 
5298   RedirtyLoggedCardTableEntryFastClosure redirty;
5299   dirty_card_queue_set().set_closure(&redirty);
5300   dirty_card_queue_set().apply_closure_to_all_completed_buffers();









5301 
5302   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5303   dcq.merge_bufferlists(&dirty_card_queue_set());
5304   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5305 
5306   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5307 }
5308 
5309 // Weak Reference Processing support
5310 
5311 // An always "is_alive" closure that is used to preserve referents.
5312 // If the object is non-null then it's alive.  Used in the preservation
5313 // of referent objects that are pointed to by reference objects
5314 // discovered by the CM ref processor.
5315 class G1AlwaysAliveClosure: public BoolObjectClosure {
5316   G1CollectedHeap* _g1;
5317 public:
5318   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5319   bool do_object_b(oop p) {
5320     if (p != NULL) {




  81 // and allocate_new_tlab, which are the "entry" points to the
  82 // allocation code from the rest of the JVM.  (Note that this does not
  83 // apply to TLAB allocation, which is not part of this interface: it
  84 // is done by clients of this interface.)
  85 
  86 // Notes on implementation of parallelism in different tasks.
  87 //
  88 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  89 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  90 // It does use run_task() which sets _n_workers in the task.
  91 // G1ParTask executes g1_process_strong_roots() ->
  92 // SharedHeap::process_strong_roots() which calls eventually to
  93 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  94 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  95 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  96 //
  97 
  98 // Local to this file.
  99 
 100 class RefineCardTableEntryClosure: public CardTableEntryClosure {


 101   bool _concurrent;
 102 public:
 103   RefineCardTableEntryClosure() : _concurrent(true) { }
 104 


 105   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 106     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 107     // This path is executed by the concurrent refine or mutator threads,
 108     // concurrently, and so we do not care if card_ptr contains references
 109     // that point into the collection set.
 110     assert(!oops_into_cset, "should be");
 111 
 112     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 113       // Caller will actually yield.
 114       return false;
 115     }
 116     // Otherwise, we finished successfully; return true.
 117     return true;
 118   }
 119 
 120   void set_concurrent(bool b) { _concurrent = b; }
 121 };
 122 
 123 
 124 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 125   int _calls;
 126   G1CollectedHeap* _g1h;
 127   CardTableModRefBS* _ctbs;
 128   int _histo[256];
 129 public:
 130   ClearLoggedCardTableEntryClosure() :
 131     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
 132   {
 133     for (int i = 0; i < 256; i++) _histo[i] = 0;
 134   }
 135   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 136     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 137       _calls++;
 138       unsigned char* ujb = (unsigned char*)card_ptr;
 139       int ind = (int)(*ujb);


 455   if (hr == NULL) {
 456      // null
 457      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 458      return false;
 459   } else {
 460     return !hr->isHumongous();
 461   }
 462 }
 463 
 464 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 465   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 466   CardTableModRefBS* ct_bs = g1_barrier_set();
 467 
 468   // Count the dirty cards at the start.
 469   CountNonCleanMemRegionClosure count1(this);
 470   ct_bs->mod_card_iterate(&count1);
 471   int orig_count = count1.n();
 472 
 473   // First clear the logged cards.
 474   ClearLoggedCardTableEntryClosure clear;
 475   dcqs.apply_closure_to_all_completed_buffers(&clear);
 476   dcqs.iterate_closure_all_threads(&clear, false);

 477   clear.print_histo();
 478 
 479   // Now ensure that there's no dirty cards.
 480   CountNonCleanMemRegionClosure count2(this);
 481   ct_bs->mod_card_iterate(&count2);
 482   if (count2.n() != 0) {
 483     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 484                            count2.n(), orig_count);
 485   }
 486   guarantee(count2.n() == 0, "Card table should be clean.");
 487 
 488   RedirtyLoggedCardTableEntryClosure redirty;
 489   dcqs.apply_closure_to_all_completed_buffers(&redirty);
 490   dcqs.iterate_closure_all_threads(&redirty, false);

 491   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 492                          clear.calls(), orig_count);
 493   guarantee(redirty.calls() == clear.calls(),
 494             "Or else mechanism is broken.");
 495 
 496   CountNonCleanMemRegionClosure count3(this);
 497   ct_bs->mod_card_iterate(&count3);
 498   if (count3.n() != orig_count) {
 499     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 500                            orig_count, count3.n());
 501     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 502   }


 503 }
 504 
 505 // Private class members.
 506 
 507 G1CollectedHeap* G1CollectedHeap::_g1h;
 508 
 509 // Private methods.
 510 
 511 HeapRegion*
 512 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 513   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 514   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 515     if (!_secondary_free_list.is_empty()) {
 516       if (G1ConcRegionFreeingVerbose) {
 517         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 518                                "secondary_free_list has %u entries",
 519                                _secondary_free_list.length());
 520       }
 521       // It looks as if there are free regions available on the
 522       // secondary_free_list. Let's move them to the free_list and try


1976   // We have to initialize the printer before committing the heap, as
1977   // it will be used then.
1978   _hr_printer.set_active(G1PrintHeapRegions);
1979 
1980   // While there are no constraints in the GC code that HeapWordSize
1981   // be any particular value, there are multiple other areas in the
1982   // system which believe this to be true (e.g. oop->object_size in some
1983   // cases incorrectly returns the size in wordSize units rather than
1984   // HeapWordSize).
1985   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1986 
1987   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1988   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1989   size_t heap_alignment = collector_policy()->heap_alignment();
1990 
1991   // Ensure that the sizes are properly aligned.
1992   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1993   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1994   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1995 
1996   _refine_cte_cl = new RefineCardTableEntryClosure();
1997 
1998   _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
1999 
2000   // Reserve the maximum.
2001 
2002   // When compressed oops are enabled, the preferred heap base
2003   // is calculated by subtracting the requested size from the
2004   // 32Gb boundary and using the result as the base address for
2005   // heap reservation. If the requested size is not aligned to
2006   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2007   // into the ReservedHeapSpace constructor) then the actual
2008   // base of the reserved heap may end up differing from the
2009   // address that was requested (i.e. the preferred heap base).
2010   // If this happens then we could end up using a non-optimal
2011   // compressed oops mode.
2012 
2013   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2014                                                  heap_alignment);
2015 
2016   // It is important to do this in a way such that concurrent readers can't
2017   // temporarily think something is in the heap.  (I've actually seen this
2018   // happen in asserts: DLD.)


2073   // (Must do this late, so that "max_regions" is defined.)
2074   _cm = new ConcurrentMark(this, heap_rs);
2075   if (_cm == NULL || !_cm->completed_initialization()) {
2076     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2077     return JNI_ENOMEM;
2078   }
2079   _cmThread = _cm->cmThread();
2080 
2081   // Initialize the from_card cache structure of HeapRegionRemSet.
2082   HeapRegionRemSet::init_heap(max_regions());
2083 
2084   // Now expand into the initial heap size.
2085   if (!expand(init_byte_size)) {
2086     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2087     return JNI_ENOMEM;
2088   }
2089 
2090   // Perform any initialization actions delegated to the policy.
2091   g1_policy()->init();
2092 





2093   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2094                                                SATB_Q_FL_lock,
2095                                                G1SATBProcessCompletedThreshold,
2096                                                Shared_SATB_Q_lock);
2097 
2098   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
2099                                                 DirtyCardQ_CBL_mon,
2100                                                 DirtyCardQ_FL_lock,
2101                                                 concurrent_g1_refine()->yellow_zone(),
2102                                                 concurrent_g1_refine()->red_zone(),
2103                                                 Shared_DirtyCardQ_lock);
2104 
2105   if (G1DeferredRSUpdate) {
2106     dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
2107                                       DirtyCardQ_CBL_mon,
2108                                       DirtyCardQ_FL_lock,
2109                                       -1, // never trigger processing
2110                                       -1, // no limit on length
2111                                       Shared_DirtyCardQ_lock,
2112                                       &JavaThread::dirty_card_queue_set());
2113   }
2114 
2115   // Initialize the card queue set used to hold cards containing
2116   // references into the collection set.
2117   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2118                                              DirtyCardQ_CBL_mon,
2119                                              DirtyCardQ_FL_lock,
2120                                              -1, // never trigger processing
2121                                              -1, // no limit on length
2122                                              Shared_DirtyCardQ_lock,
2123                                              &JavaThread::dirty_card_queue_set());
2124 
2125   // In case we're keeping closure specialization stats, initialize those
2126   // counts and that mechanism.
2127   SpecializationStats::clear();
2128 
2129   // Here we allocate the dummy full region that is required by the
2130   // G1AllocRegion class. If we don't pass an address in the reserved
2131   // space here, lots of asserts fire.
2132 
2133   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2134                                              _g1_reserved.start());
2135   // We'll re-use the same region whether the alloc region will
2136   // require BOT updates or not and, if it doesn't, then a non-young
2137   // region will complain that it cannot support allocations without
2138   // BOT updates. So we'll tag the dummy region as young to avoid that.


5260     set_par_threads(n_workers);
5261     workers()->run_task(&g1_unlink_task);
5262     set_par_threads(0);
5263   } else {
5264     g1_unlink_task.work(0);
5265   }
5266   if (G1TraceStringSymbolTableScrubbing) {
5267     gclog_or_tty->print_cr("Cleaned string and symbol table, "
5268                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5269                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5270                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5271                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5272   }
5273 
5274   if (G1StringDedup::is_enabled()) {
5275     G1StringDedup::unlink(is_alive);
5276   }
5277 }
5278 
5279 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
5280  private:
5281   size_t _num_processed;
5282 
5283  public:
5284   RedirtyLoggedCardTableEntryFastClosure() : CardTableEntryClosure(), _num_processed(0) { }
5285 
5286   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
5287     *card_ptr = CardTableModRefBS::dirty_card_val();
5288     _num_processed++;
5289     return true;
5290   }
5291 
5292   size_t num_processed() const { return _num_processed; }
5293 };
5294 
5295 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5296  private:
5297   DirtyCardQueueSet* _queue;
5298  public:
5299   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5300 
5301   virtual void work(uint worker_id) {
5302     double start_time = os::elapsedTime();
5303 
5304     RedirtyLoggedCardTableEntryFastClosure cl;
5305     if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5306       _queue->par_apply_closure_to_all_completed_buffers(&cl);
5307     } else {
5308       _queue->apply_closure_to_all_completed_buffers(&cl);
5309     }
5310 
5311     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5312     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5313     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5314   }
5315 };
5316 
5317 void G1CollectedHeap::redirty_logged_cards() {
5318   guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
5319   double redirty_logged_cards_start = os::elapsedTime();
5320 
5321   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5322                    _g1h->workers()->active_workers() : 1);
5323 
5324   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5325   dirty_card_queue_set().reset_for_par_iteration();
5326   if (use_parallel_gc_threads()) {
5327     set_par_threads(n_workers);
5328     workers()->run_task(&redirty_task);
5329     set_par_threads(0);
5330   } else {
5331     redirty_task.work(0);
5332   }
5333 
5334   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5335   dcq.merge_bufferlists(&dirty_card_queue_set());
5336   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5337 
5338   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5339 }
5340 
5341 // Weak Reference Processing support
5342 
5343 // An always "is_alive" closure that is used to preserve referents.
5344 // If the object is non-null then it's alive.  Used in the preservation
5345 // of referent objects that are pointed to by reference objects
5346 // discovered by the CM ref processor.
5347 class G1AlwaysAliveClosure: public BoolObjectClosure {
5348   G1CollectedHeap* _g1;
5349 public:
5350   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5351   bool do_object_b(oop p) {
5352     if (p != NULL) {