< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 11044 : 8153507: Improve Card Table Clear Task
Summary: Move card table clear code into remembered set related files. Improve work distribution of this task, and tune thread usage.
Reviewed-by:


 146       _num_dirtied++;
 147     }
 148 
 149     return true;
 150   }
 151 
 152   size_t num_dirtied()   const { return _num_dirtied; }
 153 };
 154 
 155 
 156 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 157   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 158 }
 159 
 160 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 161   // The from card cache is not the memory that is actually committed. So we cannot
 162   // take advantage of the zero_filled parameter.
 163   reset_from_card_cache(start_idx, num_regions);
 164 }
 165 
 166 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 167 {
 168   // Claim the right to put the region on the dirty cards region list
 169   // by installing a self pointer.
 170   HeapRegion* next = hr->get_next_dirty_cards_region();
 171   if (next == NULL) {
 172     HeapRegion* res = (HeapRegion*)
 173       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 174                           NULL);
 175     if (res == NULL) {
 176       HeapRegion* head;
 177       do {
 178         // Put the region to the dirty cards region list.
 179         head = _dirty_cards_region_list;
 180         next = (HeapRegion*)
 181           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 182         if (next == head) {
 183           assert(hr->get_next_dirty_cards_region() == hr,
 184                  "hr->get_next_dirty_cards_region() != hr");
 185           if (next == NULL) {
 186             // The last region in the list points to itself.
 187             hr->set_next_dirty_cards_region(hr);
 188           } else {
 189             hr->set_next_dirty_cards_region(next);
 190           }
 191         }
 192       } while (next != head);
 193     }
 194   }
 195 }
 196 
 197 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
 198 {
 199   HeapRegion* head;
 200   HeapRegion* hr;
 201   do {
 202     head = _dirty_cards_region_list;
 203     if (head == NULL) {
 204       return NULL;
 205     }
 206     HeapRegion* new_head = head->get_next_dirty_cards_region();
 207     if (head == new_head) {
 208       // The last region.
 209       new_head = NULL;
 210     }
 211     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
 212                                           head);
 213   } while (hr != head);
 214   assert(hr != NULL, "invariant");
 215   hr->set_next_dirty_cards_region(NULL);
 216   return hr;
 217 }
 218 
 219 // Returns true if the reference points to an object that
 220 // can move in an incremental collection.
 221 bool G1CollectedHeap::is_scavengable(const void* p) {
 222   HeapRegion* hr = heap_region_containing(p);
 223   return !hr->is_pinned();
 224 }
 225 
 226 // Private methods.
 227 
 228 HeapRegion*
 229 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 230   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 231   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 232     if (!_secondary_free_list.is_empty()) {
 233       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 234                                       "secondary_free_list has %u entries",
 235                                       _secondary_free_list.length());
 236       // It looks as if there are free regions available on the
 237       // secondary_free_list. Let's move them to the free_list and try
 238       // again to allocate from it.


1760   _cg1r(NULL),
1761   _g1mm(NULL),
1762   _refine_cte_cl(NULL),
1763   _preserved_marks_set(true /* in_c_heap */),
1764   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1765   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1766   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1767   _humongous_reclaim_candidates(),
1768   _has_humongous_reclaim_candidates(false),
1769   _archive_allocator(NULL),
1770   _free_regions_coming(false),
1771   _young_list(new YoungList(this)),
1772   _gc_time_stamp(0),
1773   _summary_bytes_used(0),
1774   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1775   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1776   _expand_heap_after_alloc_failure(true),
1777   _old_marking_cycles_started(0),
1778   _old_marking_cycles_completed(0),
1779   _in_cset_fast_test(),
1780   _dirty_cards_region_list(NULL),
1781   _worker_cset_start_region(NULL),
1782   _worker_cset_start_region_time_stamp(NULL),
1783   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1784   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1785 
1786   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1787                           /* are_GC_task_threads */true,
1788                           /* are_ConcurrentGC_threads */false);
1789   _workers->initialize_workers();
1790   _verifier = new G1HeapVerifier(this);
1791 
1792   _allocator = G1Allocator::create_allocator(this);
1793 
1794   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1795 
1796   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1797 
1798   // Override the default _filler_array_max_size so that no humongous filler
1799   // objects are created.
1800   _filler_array_max_size = _humongous_object_threshold_in_words;


4726   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4727     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4728     _old_set.bulk_remove(old_regions_removed);
4729     _humongous_set.bulk_remove(humongous_regions_removed);
4730   }
4731 
4732 }
4733 
4734 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4735   assert(list != NULL, "list can't be null");
4736   if (!list->is_empty()) {
4737     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4738     _hrm.insert_list_into_free_list(list);
4739   }
4740 }
4741 
4742 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4743   decrease_used(bytes);
4744 }
4745 
4746 class G1ParCleanupCTTask : public AbstractGangTask {
4747   G1SATBCardTableModRefBS* _ct_bs;
4748   G1CollectedHeap* _g1h;
4749   HeapRegion* volatile _su_head;
4750 public:
4751   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
4752                      G1CollectedHeap* g1h) :
4753     AbstractGangTask("G1 Par Cleanup CT Task"),
4754     _ct_bs(ct_bs), _g1h(g1h) { }
4755 
4756   void work(uint worker_id) {
4757     HeapRegion* r;
4758     while (r = _g1h->pop_dirty_cards_region()) {
4759       clear_cards(r);
4760     }
4761   }
4762 
4763   void clear_cards(HeapRegion* r) {
4764     // Cards of the survivors should have already been dirtied.
4765     if (!r->is_survivor()) {
4766       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
4767     }
4768   }
4769 };
4770 
4771 class G1ParScrubRemSetTask: public AbstractGangTask {
4772 protected:
4773   G1RemSet* _g1rs;
4774   HeapRegionClaimer _hrclaimer;
4775 
4776 public:
4777   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4778     AbstractGangTask("G1 ScrubRS"),
4779     _g1rs(g1_rs),
4780     _hrclaimer(num_workers) {
4781   }
4782 
4783   void work(uint worker_id) {
4784     _g1rs->scrub(worker_id, &_hrclaimer);
4785   }
4786 };
4787 
4788 void G1CollectedHeap::scrub_rem_set() {
4789   uint num_workers = workers()->active_workers();
4790   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4791   workers()->run_task(&g1_par_scrub_rs_task);
4792 }
4793 
4794 void G1CollectedHeap::cleanUpCardTable() {
4795   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
4796   double start = os::elapsedTime();
4797 
4798   {
4799     // Iterate over the dirty cards region list.
4800     G1ParCleanupCTTask cleanup_task(ct_bs, this);
4801 
4802     workers()->run_task(&cleanup_task);
4803 #ifndef PRODUCT
4804     // Need to synchronize with concurrent cleanup since it needs to
4805     // finish its card table clearing before we can verify.
4806     wait_while_free_regions_coming();
4807     _verifier->verify_card_table_cleanup();
4808 #endif
4809   }
4810 
4811   double elapsed = os::elapsedTime() - start;
4812   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
4813 }
4814 
4815 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4816   size_t pre_used = 0;
4817   FreeRegionList local_free_list("Local List for CSet Freeing");
4818 
4819   double young_time_ms     = 0.0;
4820   double non_young_time_ms = 0.0;
4821 
4822   // Since the collection set is a superset of the the young list,
4823   // all we need to do to clear the young list is clear its
4824   // head and length, and unlink any young regions in the code below
4825   _young_list->clear();
4826 
4827   G1Policy* policy = g1_policy();
4828 
4829   double start_sec = os::elapsedTime();
4830   bool non_young = true;
4831 
4832   HeapRegion* cur = cs_head;




 146       _num_dirtied++;
 147     }
 148 
 149     return true;
 150   }
 151 
 152   size_t num_dirtied()   const { return _num_dirtied; }
 153 };
 154 
 155 
 156 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 157   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 158 }
 159 
 160 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 161   // The from card cache is not the memory that is actually committed. So we cannot
 162   // take advantage of the zero_filled parameter.
 163   reset_from_card_cache(start_idx, num_regions);
 164 }
 165 





















































 166 // Returns true if the reference points to an object that
 167 // can move in an incremental collection.
 168 bool G1CollectedHeap::is_scavengable(const void* p) {
 169   HeapRegion* hr = heap_region_containing(p);
 170   return !hr->is_pinned();
 171 }
 172 
 173 // Private methods.
 174 
 175 HeapRegion*
 176 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 177   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 178   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 179     if (!_secondary_free_list.is_empty()) {
 180       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 181                                       "secondary_free_list has %u entries",
 182                                       _secondary_free_list.length());
 183       // It looks as if there are free regions available on the
 184       // secondary_free_list. Let's move them to the free_list and try
 185       // again to allocate from it.


1707   _cg1r(NULL),
1708   _g1mm(NULL),
1709   _refine_cte_cl(NULL),
1710   _preserved_marks_set(true /* in_c_heap */),
1711   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1712   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1713   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1714   _humongous_reclaim_candidates(),
1715   _has_humongous_reclaim_candidates(false),
1716   _archive_allocator(NULL),
1717   _free_regions_coming(false),
1718   _young_list(new YoungList(this)),
1719   _gc_time_stamp(0),
1720   _summary_bytes_used(0),
1721   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1722   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1723   _expand_heap_after_alloc_failure(true),
1724   _old_marking_cycles_started(0),
1725   _old_marking_cycles_completed(0),
1726   _in_cset_fast_test(),

1727   _worker_cset_start_region(NULL),
1728   _worker_cset_start_region_time_stamp(NULL),
1729   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1730   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1731 
1732   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1733                           /* are_GC_task_threads */true,
1734                           /* are_ConcurrentGC_threads */false);
1735   _workers->initialize_workers();
1736   _verifier = new G1HeapVerifier(this);
1737 
1738   _allocator = G1Allocator::create_allocator(this);
1739 
1740   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1741 
1742   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1743 
1744   // Override the default _filler_array_max_size so that no humongous filler
1745   // objects are created.
1746   _filler_array_max_size = _humongous_object_threshold_in_words;


4672   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4673     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4674     _old_set.bulk_remove(old_regions_removed);
4675     _humongous_set.bulk_remove(humongous_regions_removed);
4676   }
4677 
4678 }
4679 
4680 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4681   assert(list != NULL, "list can't be null");
4682   if (!list->is_empty()) {
4683     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4684     _hrm.insert_list_into_free_list(list);
4685   }
4686 }
4687 
4688 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4689   decrease_used(bytes);
4690 }
4691 

























4692 class G1ParScrubRemSetTask: public AbstractGangTask {
4693 protected:
4694   G1RemSet* _g1rs;
4695   HeapRegionClaimer _hrclaimer;
4696 
4697 public:
4698   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4699     AbstractGangTask("G1 ScrubRS"),
4700     _g1rs(g1_rs),
4701     _hrclaimer(num_workers) {
4702   }
4703 
4704   void work(uint worker_id) {
4705     _g1rs->scrub(worker_id, &_hrclaimer);
4706   }
4707 };
4708 
4709 void G1CollectedHeap::scrub_rem_set() {
4710   uint num_workers = workers()->active_workers();
4711   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4712   workers()->run_task(&g1_par_scrub_rs_task);





















4713 }
4714 
4715 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4716   size_t pre_used = 0;
4717   FreeRegionList local_free_list("Local List for CSet Freeing");
4718 
4719   double young_time_ms     = 0.0;
4720   double non_young_time_ms = 0.0;
4721 
4722   // Since the collection set is a superset of the the young list,
4723   // all we need to do to clear the young list is clear its
4724   // head and length, and unlink any young regions in the code below
4725   _young_list->clear();
4726 
4727   G1Policy* policy = g1_policy();
4728 
4729   double start_sec = os::elapsedTime();
4730   bool non_young = true;
4731 
4732   HeapRegion* cur = cs_head;


< prev index next >