src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6279 : imported patch fast-cset-uses-biasedarray
rev 6282 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled.
Reviewed-by:
rev 6283 : 8019342: G1: High "Other" time most likely due to card redirtying
Summary: Parallelize card redirtying to decrease the time it takes.
Reviewed-by: tbd, tbd
rev 6284 : [mq]: fixes-cleanup


  75 // and allocate_new_tlab, which are the "entry" points to the
  76 // allocation code from the rest of the JVM.  (Note that this does not
  77 // apply to TLAB allocation, which is not part of this interface: it
  78 // is done by clients of this interface.)
  79 
  80 // Notes on implementation of parallelism in different tasks.
  81 //
  82 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  83 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  84 // It does use run_task() which sets _n_workers in the task.
  85 // G1ParTask executes g1_process_strong_roots() ->
  86 // SharedHeap::process_strong_roots() which calls eventually to
  87 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  88 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  89 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  90 //
  91 
  92 // Local to this file.
  93 
  94 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  95   G1RemSet* _g1rs;
  96   ConcurrentG1Refine* _cg1r;
  97   bool _concurrent;
  98 public:
  99   RefineCardTableEntryClosure(G1RemSet* g1rs,
 100                               ConcurrentG1Refine* cg1r) :
 101     _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
 102   {}
 103   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 104     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
 105     // This path is executed by the concurrent refine or mutator threads,
 106     // concurrently, and so we do not care if card_ptr contains references
 107     // that point into the collection set.
 108     assert(!oops_into_cset, "should be");
 109 
 110     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 111       // Caller will actually yield.
 112       return false;
 113     }
 114     // Otherwise, we finished successfully; return true.
 115     return true;
 116   }

 117   void set_concurrent(bool b) { _concurrent = b; }
 118 };
 119 
 120 
 121 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 122   int _calls;
 123   G1CollectedHeap* _g1h;
 124   CardTableModRefBS* _ctbs;
 125   int _histo[256];
 126 public:
 127   ClearLoggedCardTableEntryClosure() :
 128     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
 129   {
 130     for (int i = 0; i < 256; i++) _histo[i] = 0;
 131   }
 132   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 133     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 134       _calls++;
 135       unsigned char* ujb = (unsigned char*)card_ptr;
 136       int ind = (int)(*ujb);


 460   if (hr == NULL) {
 461      // null
 462      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 463      return false;
 464   } else {
 465     return !hr->isHumongous();
 466   }
 467 }
 468 
 469 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 470   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 471   CardTableModRefBS* ct_bs = g1_barrier_set();
 472 
 473   // Count the dirty cards at the start.
 474   CountNonCleanMemRegionClosure count1(this);
 475   ct_bs->mod_card_iterate(&count1);
 476   int orig_count = count1.n();
 477 
 478   // First clear the logged cards.
 479   ClearLoggedCardTableEntryClosure clear;
 480   dcqs.set_closure(&clear);
 481   dcqs.apply_closure_to_all_completed_buffers();
 482   dcqs.iterate_closure_all_threads(false);
 483   clear.print_histo();
 484 
 485   // Now ensure that there's no dirty cards.
 486   CountNonCleanMemRegionClosure count2(this);
 487   ct_bs->mod_card_iterate(&count2);
 488   if (count2.n() != 0) {
 489     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 490                            count2.n(), orig_count);
 491   }
 492   guarantee(count2.n() == 0, "Card table should be clean.");
 493 
 494   RedirtyLoggedCardTableEntryClosure redirty;
 495   JavaThread::dirty_card_queue_set().set_closure(&redirty);
 496   dcqs.apply_closure_to_all_completed_buffers();
 497   dcqs.iterate_closure_all_threads(false);
 498   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 499                          clear.calls(), orig_count);
 500   guarantee(redirty.calls() == clear.calls(),
 501             "Or else mechanism is broken.");
 502 
 503   CountNonCleanMemRegionClosure count3(this);
 504   ct_bs->mod_card_iterate(&count3);
 505   if (count3.n() != orig_count) {
 506     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 507                            orig_count, count3.n());
 508     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 509   }
 510 
 511   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 512 }
 513 
 514 // Private class members.
 515 
 516 G1CollectedHeap* G1CollectedHeap::_g1h;
 517 
 518 // Private methods.
 519 
 520 HeapRegion*
 521 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 522   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 523   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 524     if (!_secondary_free_list.is_empty()) {
 525       if (G1ConcRegionFreeingVerbose) {
 526         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 527                                "secondary_free_list has %u entries",
 528                                _secondary_free_list.length());
 529       }
 530       // It looks as if there are free regions available on the
 531       // secondary_free_list. Let's move them to the free_list and try


1985   // We have to initialize the printer before committing the heap, as
1986   // it will be used then.
1987   _hr_printer.set_active(G1PrintHeapRegions);
1988 
1989   // While there are no constraints in the GC code that HeapWordSize
1990   // be any particular value, there are multiple other areas in the
1991   // system which believe this to be true (e.g. oop->object_size in some
1992   // cases incorrectly returns the size in wordSize units rather than
1993   // HeapWordSize).
1994   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1995 
1996   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1997   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1998   size_t heap_alignment = collector_policy()->heap_alignment();
1999 
2000   // Ensure that the sizes are properly aligned.
2001   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
2002   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2003   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
2004 
2005   _cg1r = new ConcurrentG1Refine(this);


2006 
2007   // Reserve the maximum.
2008 
2009   // When compressed oops are enabled, the preferred heap base
2010   // is calculated by subtracting the requested size from the
2011   // 32Gb boundary and using the result as the base address for
2012   // heap reservation. If the requested size is not aligned to
2013   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2014   // into the ReservedHeapSpace constructor) then the actual
2015   // base of the reserved heap may end up differing from the
2016   // address that was requested (i.e. the preferred heap base).
2017   // If this happens then we could end up using a non-optimal
2018   // compressed oops mode.
2019 
2020   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2021                                                  heap_alignment);
2022 
2023   // It is important to do this in a way such that concurrent readers can't
2024   // temporarily think something is in the heap.  (I've actually seen this
2025   // happen in asserts: DLD.)


2080   // (Must do this late, so that "max_regions" is defined.)
2081   _cm = new ConcurrentMark(this, heap_rs);
2082   if (_cm == NULL || !_cm->completed_initialization()) {
2083     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2084     return JNI_ENOMEM;
2085   }
2086   _cmThread = _cm->cmThread();
2087 
2088   // Initialize the from_card cache structure of HeapRegionRemSet.
2089   HeapRegionRemSet::init_heap(max_regions());
2090 
2091   // Now expand into the initial heap size.
2092   if (!expand(init_byte_size)) {
2093     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2094     return JNI_ENOMEM;
2095   }
2096 
2097   // Perform any initialization actions delegated to the policy.
2098   g1_policy()->init();
2099 
2100   _refine_cte_cl =
2101     new RefineCardTableEntryClosure(g1_rem_set(),
2102                                     concurrent_g1_refine());
2103   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2104 
2105   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2106                                                SATB_Q_FL_lock,
2107                                                G1SATBProcessCompletedThreshold,
2108                                                Shared_SATB_Q_lock);
2109 
2110   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,

2111                                                 DirtyCardQ_FL_lock,
2112                                                 concurrent_g1_refine()->yellow_zone(),
2113                                                 concurrent_g1_refine()->red_zone(),
2114                                                 Shared_DirtyCardQ_lock);
2115 
2116   if (G1DeferredRSUpdate) {
2117     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,

2118                                       DirtyCardQ_FL_lock,
2119                                       -1, // never trigger processing
2120                                       -1, // no limit on length
2121                                       Shared_DirtyCardQ_lock,
2122                                       &JavaThread::dirty_card_queue_set());
2123   }
2124 
2125   // Initialize the card queue set used to hold cards containing
2126   // references into the collection set.
2127   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,

2128                                              DirtyCardQ_FL_lock,
2129                                              -1, // never trigger processing
2130                                              -1, // no limit on length
2131                                              Shared_DirtyCardQ_lock,
2132                                              &JavaThread::dirty_card_queue_set());
2133 
2134   // In case we're keeping closure specialization stats, initialize those
2135   // counts and that mechanism.
2136   SpecializationStats::clear();
2137 
2138   // Here we allocate the dummy full region that is required by the
2139   // G1AllocRegion class. If we don't pass an address in the reserved
2140   // space here, lots of asserts fire.
2141 
2142   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2143                                              _g1_reserved.start());
2144   // We'll re-use the same region whether the alloc region will
2145   // require BOT updates or not and, if it doesn't, then a non-young
2146   // region will complain that it cannot support allocations without
2147   // BOT updates. So we'll tag the dummy region as young to avoid that.


5245     set_par_threads(n_workers);
5246     workers()->run_task(&g1_unlink_task);
5247     set_par_threads(0);
5248   } else {
5249     g1_unlink_task.work(0);
5250   }
5251   if (G1TraceStringSymbolTableScrubbing) {
5252     gclog_or_tty->print_cr("Cleaned string and symbol table, "
5253                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5254                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5255                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5256                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5257   }
5258 
5259   if (G1StringDedup::is_enabled()) {
5260     G1StringDedup::unlink(is_alive);
5261   }
5262 }
5263 
5264 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
5265 public:





5266   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
5267     *card_ptr = CardTableModRefBS::dirty_card_val();

5268     return true;
5269   }
























5270 };
5271 
5272 void G1CollectedHeap::redirty_logged_cards() {
5273   guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
5274   double redirty_logged_cards_start = os::elapsedTime();
5275 
5276   RedirtyLoggedCardTableEntryFastClosure redirty;
5277   dirty_card_queue_set().set_closure(&redirty);
5278   dirty_card_queue_set().apply_closure_to_all_completed_buffers();









5279 
5280   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5281   dcq.merge_bufferlists(&dirty_card_queue_set());
5282   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5283 
5284   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5285 }
5286 
5287 // Weak Reference Processing support
5288 
5289 // An always "is_alive" closure that is used to preserve referents.
5290 // If the object is non-null then it's alive.  Used in the preservation
5291 // of referent objects that are pointed to by reference objects
5292 // discovered by the CM ref processor.
5293 class G1AlwaysAliveClosure: public BoolObjectClosure {
5294   G1CollectedHeap* _g1;
5295 public:
5296   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5297   bool do_object_b(oop p) {
5298     if (p != NULL) {




  75 // and allocate_new_tlab, which are the "entry" points to the
  76 // allocation code from the rest of the JVM.  (Note that this does not
  77 // apply to TLAB allocation, which is not part of this interface: it
  78 // is done by clients of this interface.)
  79 
  80 // Notes on implementation of parallelism in different tasks.
  81 //
  82 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  83 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  84 // It does use run_task() which sets _n_workers in the task.
  85 // G1ParTask executes g1_process_strong_roots() ->
  86 // SharedHeap::process_strong_roots() which calls eventually to
  87 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  88 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  89 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  90 //
  91 
  92 // Local to this file.
  93 
  94 class RefineCardTableEntryClosure: public CardTableEntryClosure {


  95   bool _concurrent;
  96 public:
  97   RefineCardTableEntryClosure() : _concurrent(true) { }
  98 


  99   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 100     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 101     // This path is executed by the concurrent refine or mutator threads,
 102     // concurrently, and so we do not care if card_ptr contains references
 103     // that point into the collection set.
 104     assert(!oops_into_cset, "should be");
 105 
 106     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 107       // Caller will actually yield.
 108       return false;
 109     }
 110     // Otherwise, we finished successfully; return true.
 111     return true;
 112   }
 113 
 114   void set_concurrent(bool b) { _concurrent = b; }
 115 };
 116 
 117 
 118 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 119   int _calls;
 120   G1CollectedHeap* _g1h;
 121   CardTableModRefBS* _ctbs;
 122   int _histo[256];
 123 public:
 124   ClearLoggedCardTableEntryClosure() :
 125     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
 126   {
 127     for (int i = 0; i < 256; i++) _histo[i] = 0;
 128   }
 129   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 130     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 131       _calls++;
 132       unsigned char* ujb = (unsigned char*)card_ptr;
 133       int ind = (int)(*ujb);


 457   if (hr == NULL) {
 458      // null
 459      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 460      return false;
 461   } else {
 462     return !hr->isHumongous();
 463   }
 464 }
 465 
 466 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 467   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 468   CardTableModRefBS* ct_bs = g1_barrier_set();
 469 
 470   // Count the dirty cards at the start.
 471   CountNonCleanMemRegionClosure count1(this);
 472   ct_bs->mod_card_iterate(&count1);
 473   int orig_count = count1.n();
 474 
 475   // First clear the logged cards.
 476   ClearLoggedCardTableEntryClosure clear;
 477   dcqs.apply_closure_to_all_completed_buffers(&clear);
 478   dcqs.iterate_closure_all_threads(&clear, false);

 479   clear.print_histo();
 480 
 481   // Now ensure that there's no dirty cards.
 482   CountNonCleanMemRegionClosure count2(this);
 483   ct_bs->mod_card_iterate(&count2);
 484   if (count2.n() != 0) {
 485     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 486                            count2.n(), orig_count);
 487   }
 488   guarantee(count2.n() == 0, "Card table should be clean.");
 489 
 490   RedirtyLoggedCardTableEntryClosure redirty;
 491   dcqs.apply_closure_to_all_completed_buffers(&redirty);
 492   dcqs.iterate_closure_all_threads(&redirty, false);

 493   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 494                          clear.calls(), orig_count);
 495   guarantee(redirty.calls() == clear.calls(),
 496             "Or else mechanism is broken.");
 497 
 498   CountNonCleanMemRegionClosure count3(this);
 499   ct_bs->mod_card_iterate(&count3);
 500   if (count3.n() != orig_count) {
 501     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 502                            orig_count, count3.n());
 503     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 504   }


 505 }
 506 
 507 // Private class members.
 508 
 509 G1CollectedHeap* G1CollectedHeap::_g1h;
 510 
 511 // Private methods.
 512 
 513 HeapRegion*
 514 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 515   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 516   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 517     if (!_secondary_free_list.is_empty()) {
 518       if (G1ConcRegionFreeingVerbose) {
 519         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 520                                "secondary_free_list has %u entries",
 521                                _secondary_free_list.length());
 522       }
 523       // It looks as if there are free regions available on the
 524       // secondary_free_list. Let's move them to the free_list and try


1978   // We have to initialize the printer before committing the heap, as
1979   // it will be used then.
1980   _hr_printer.set_active(G1PrintHeapRegions);
1981 
1982   // While there are no constraints in the GC code that HeapWordSize
1983   // be any particular value, there are multiple other areas in the
1984   // system which believe this to be true (e.g. oop->object_size in some
1985   // cases incorrectly returns the size in wordSize units rather than
1986   // HeapWordSize).
1987   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1988 
1989   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1990   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1991   size_t heap_alignment = collector_policy()->heap_alignment();
1992 
1993   // Ensure that the sizes are properly aligned.
1994   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1995   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1996   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1997 
1998   _refine_cte_cl = new RefineCardTableEntryClosure();
1999 
2000   _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
2001 
2002   // Reserve the maximum.
2003 
2004   // When compressed oops are enabled, the preferred heap base
2005   // is calculated by subtracting the requested size from the
2006   // 32Gb boundary and using the result as the base address for
2007   // heap reservation. If the requested size is not aligned to
2008   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2009   // into the ReservedHeapSpace constructor) then the actual
2010   // base of the reserved heap may end up differing from the
2011   // address that was requested (i.e. the preferred heap base).
2012   // If this happens then we could end up using a non-optimal
2013   // compressed oops mode.
2014 
2015   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2016                                                  heap_alignment);
2017 
2018   // It is important to do this in a way such that concurrent readers can't
2019   // temporarily think something is in the heap.  (I've actually seen this
2020   // happen in asserts: DLD.)


2075   // (Must do this late, so that "max_regions" is defined.)
2076   _cm = new ConcurrentMark(this, heap_rs);
2077   if (_cm == NULL || !_cm->completed_initialization()) {
2078     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2079     return JNI_ENOMEM;
2080   }
2081   _cmThread = _cm->cmThread();
2082 
2083   // Initialize the from_card cache structure of HeapRegionRemSet.
2084   HeapRegionRemSet::init_heap(max_regions());
2085 
2086   // Now expand into the initial heap size.
2087   if (!expand(init_byte_size)) {
2088     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2089     return JNI_ENOMEM;
2090   }
2091 
2092   // Perform any initialization actions delegated to the policy.
2093   g1_policy()->init();
2094 





2095   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2096                                                SATB_Q_FL_lock,
2097                                                G1SATBProcessCompletedThreshold,
2098                                                Shared_SATB_Q_lock);
2099 
2100   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
2101                                                 DirtyCardQ_CBL_mon,
2102                                                 DirtyCardQ_FL_lock,
2103                                                 concurrent_g1_refine()->yellow_zone(),
2104                                                 concurrent_g1_refine()->red_zone(),
2105                                                 Shared_DirtyCardQ_lock);
2106 
2107   if (G1DeferredRSUpdate) {
2108     dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
2109                                       DirtyCardQ_CBL_mon,
2110                                       DirtyCardQ_FL_lock,
2111                                       -1, // never trigger processing
2112                                       -1, // no limit on length
2113                                       Shared_DirtyCardQ_lock,
2114                                       &JavaThread::dirty_card_queue_set());
2115   }
2116 
2117   // Initialize the card queue set used to hold cards containing
2118   // references into the collection set.
2119   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2120                                              DirtyCardQ_CBL_mon,
2121                                              DirtyCardQ_FL_lock,
2122                                              -1, // never trigger processing
2123                                              -1, // no limit on length
2124                                              Shared_DirtyCardQ_lock,
2125                                              &JavaThread::dirty_card_queue_set());
2126 
2127   // In case we're keeping closure specialization stats, initialize those
2128   // counts and that mechanism.
2129   SpecializationStats::clear();
2130 
2131   // Here we allocate the dummy full region that is required by the
2132   // G1AllocRegion class. If we don't pass an address in the reserved
2133   // space here, lots of asserts fire.
2134 
2135   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2136                                              _g1_reserved.start());
2137   // We'll re-use the same region whether the alloc region will
2138   // require BOT updates or not and, if it doesn't, then a non-young
2139   // region will complain that it cannot support allocations without
2140   // BOT updates. So we'll tag the dummy region as young to avoid that.


5238     set_par_threads(n_workers);
5239     workers()->run_task(&g1_unlink_task);
5240     set_par_threads(0);
5241   } else {
5242     g1_unlink_task.work(0);
5243   }
5244   if (G1TraceStringSymbolTableScrubbing) {
5245     gclog_or_tty->print_cr("Cleaned string and symbol table, "
5246                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5247                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5248                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5249                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5250   }
5251 
5252   if (G1StringDedup::is_enabled()) {
5253     G1StringDedup::unlink(is_alive);
5254   }
5255 }
5256 
5257 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
5258  private:
5259   size_t _num_processed;
5260 
5261  public:
5262   RedirtyLoggedCardTableEntryFastClosure() : CardTableEntryClosure(), _num_processed(0) { }
5263 
5264   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
5265     *card_ptr = CardTableModRefBS::dirty_card_val();
5266     _num_processed++;
5267     return true;
5268   }
5269 
5270   size_t num_processed() const { return _num_processed; }
5271 };
5272 
5273 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5274  private:
5275   DirtyCardQueueSet* _queue;
5276  public:
5277   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5278 
5279   virtual void work(uint worker_id) {
5280     double start_time = os::elapsedTime();
5281 
5282     RedirtyLoggedCardTableEntryFastClosure cl;
5283     if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5284       _queue->par_apply_closure_to_all_completed_buffers(&cl);
5285     } else {
5286       _queue->apply_closure_to_all_completed_buffers(&cl);
5287     }
5288 
5289     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5290     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5291     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5292   }
5293 };
5294 
5295 void G1CollectedHeap::redirty_logged_cards() {
5296   guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
5297   double redirty_logged_cards_start = os::elapsedTime();
5298 
5299   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5300                    _g1h->workers()->active_workers() : 1);
5301 
5302   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5303   dirty_card_queue_set().reset_for_par_iteration();
5304   if (use_parallel_gc_threads()) {
5305     set_par_threads(n_workers);
5306     workers()->run_task(&redirty_task);
5307     set_par_threads(0);
5308   } else {
5309     redirty_task.work(0);
5310   }
5311 
5312   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5313   dcq.merge_bufferlists(&dirty_card_queue_set());
5314   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5315 
5316   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5317 }
5318 
5319 // Weak Reference Processing support
5320 
5321 // An always "is_alive" closure that is used to preserve referents.
5322 // If the object is non-null then it's alive.  Used in the preservation
5323 // of referent objects that are pointed to by reference objects
5324 // discovered by the CM ref processor.
5325 class G1AlwaysAliveClosure: public BoolObjectClosure {
5326   G1CollectedHeap* _g1;
5327 public:
5328   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5329   bool do_object_b(oop p) {
5330     if (p != NULL) {