src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8047290absolutely_final Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Print this page




 823            "Must be in range.");
 824     return _sparse_table.contains_card(hr_ind, card_index);
 825   }
 826 }
 827 
 828 void
 829 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 830   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 831 }
 832 
 833 // Determines how many threads can add records to an rset in parallel.
 834 // This can be done by either mutator threads together with the
 835 // concurrent refinement threads or GC threads.
 836 uint HeapRegionRemSet::num_par_rem_sets() {
 837   return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
 838 }
 839 
 840 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 841                                    HeapRegion* hr)
 842   : _bosa(bosa),
 843     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
 844     _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
 845   reset_for_par_iteration();
 846 }
 847 
 848 void HeapRegionRemSet::setup_remset_size() {
 849   // Setup sparse and fine-grain tables sizes.
 850   // table_size = base * (log(region_size / 1M) + 1)
 851   const int LOG_M = 20;
 852   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
 853   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
 854     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
 855   }
 856   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
 857     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
 858   }
 859   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 860 }
 861 
 862 bool HeapRegionRemSet::claim_iter() {
 863   if (_iter_state != Unclaimed) return false;




 823            "Must be in range.");
 824     return _sparse_table.contains_card(hr_ind, card_index);
 825   }
 826 }
 827 
 828 void
 829 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 830   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 831 }
 832 
 833 // Determines how many threads can add records to an rset in parallel.
 834 // This can be done by either mutator threads together with the
 835 // concurrent refinement threads or GC threads.
 836 uint HeapRegionRemSet::num_par_rem_sets() {
 837   return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
 838 }
 839 
 840 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 841                                    HeapRegion* hr)
 842   : _bosa(bosa),
 843     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
 844     _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
 845   reset_for_par_iteration();
 846 }
 847 
 848 void HeapRegionRemSet::setup_remset_size() {
 849   // Setup sparse and fine-grain tables sizes.
 850   // table_size = base * (log(region_size / 1M) + 1)
 851   const int LOG_M = 20;
 852   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
 853   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
 854     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
 855   }
 856   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
 857     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
 858   }
 859   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 860 }
 861 
 862 bool HeapRegionRemSet::claim_iter() {
 863   if (_iter_state != Unclaimed) return false;


src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File