src/share/vm/gc_implementation/g1/concurrentMark.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc9 Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page




 472 }
 473 
 474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 476 #endif // _MSC_VER
 477 
 478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 479   return MAX2((n_par_threads + 2) / 4, 1U);
 480 }
 481 
 482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 483   _g1h(g1h),
 484   _markBitMap1(log2_intptr(MinObjAlignment)),
 485   _markBitMap2(log2_intptr(MinObjAlignment)),
 486   _parallel_marking_threads(0),
 487   _max_parallel_marking_threads(0),
 488   _sleep_factor(0.0),
 489   _marking_task_overhead(1.0),
 490   _cleanup_sleep_factor(0.0),
 491   _cleanup_task_overhead(1.0),
 492   _cleanup_list("Cleanup List"),
 493   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 494   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 495             CardTableModRefBS::card_shift,
 496             false /* in_resource_area*/),
 497 
 498   _prevMarkBitMap(&_markBitMap1),
 499   _nextMarkBitMap(&_markBitMap2),
 500 
 501   _markStack(this),
 502   // _finger set in set_non_marking_state
 503 
 504   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 505   // _active_tasks set in set_non_marking_state
 506   // _tasks set inside the constructor
 507   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 508   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 509 
 510   _has_overflown(false),
 511   _concurrent(false),
 512   _has_aborted(false),


1792     if (G1CollectedHeap::use_parallel_gc_threads()) {
1793       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1794                                             worker_id,
1795                                             _n_workers,
1796                                             HeapRegion::FinalCountClaimValue);
1797     } else {
1798       _g1h->heap_region_iterate(&final_update_cl);
1799     }
1800   }
1801 };
1802 
1803 class G1ParNoteEndTask;
1804 
1805 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1806   G1CollectedHeap* _g1;
1807   int _worker_num;
1808   size_t _max_live_bytes;
1809   uint _regions_claimed;
1810   size_t _freed_bytes;
1811   FreeRegionList* _local_cleanup_list;
1812   OldRegionSet* _old_proxy_set;
1813   HumongousRegionSet* _humongous_proxy_set;
1814   HRRSCleanupTask* _hrrs_cleanup_task;
1815   double _claimed_region_time;
1816   double _max_region_time;
1817 
1818 public:
1819   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1820                              int worker_num,
1821                              FreeRegionList* local_cleanup_list,
1822                              OldRegionSet* old_proxy_set,
1823                              HumongousRegionSet* humongous_proxy_set,
1824                              HRRSCleanupTask* hrrs_cleanup_task) :
1825     _g1(g1), _worker_num(worker_num),
1826     _max_live_bytes(0), _regions_claimed(0),
1827     _freed_bytes(0),
1828     _claimed_region_time(0.0), _max_region_time(0.0),
1829     _local_cleanup_list(local_cleanup_list),
1830     _old_proxy_set(old_proxy_set),
1831     _humongous_proxy_set(humongous_proxy_set),
1832     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1833 
1834   size_t freed_bytes() { return _freed_bytes; }


1835 
1836   bool doHeapRegion(HeapRegion *hr) {
1837     if (hr->continuesHumongous()) {
1838       return false;
1839     }
1840     // We use a claim value of zero here because all regions
1841     // were claimed with value 1 in the FinalCount task.
1842     _g1->reset_gc_time_stamps(hr);
1843     double start = os::elapsedTime();
1844     _regions_claimed++;
1845     hr->note_end_of_marking();
1846     _max_live_bytes += hr->max_live_bytes();
1847     _g1->free_region_if_empty(hr,
1848                               &_freed_bytes,
1849                               _local_cleanup_list,
1850                               _old_proxy_set,
1851                               _humongous_proxy_set,
1852                               _hrrs_cleanup_task,
1853                               true /* par */);









1854     double region_time = (os::elapsedTime() - start);
1855     _claimed_region_time += region_time;
1856     if (region_time > _max_region_time) {
1857       _max_region_time = region_time;
1858     }
1859     return false;
1860   }
1861 
1862   size_t max_live_bytes() { return _max_live_bytes; }
1863   uint regions_claimed() { return _regions_claimed; }
1864   double claimed_region_time_sec() { return _claimed_region_time; }
1865   double max_region_time_sec() { return _max_region_time; }
1866 };
1867 
1868 class G1ParNoteEndTask: public AbstractGangTask {
1869   friend class G1NoteEndOfConcMarkClosure;
1870 
1871 protected:
1872   G1CollectedHeap* _g1h;
1873   size_t _max_live_bytes;
1874   size_t _freed_bytes;
1875   FreeRegionList* _cleanup_list;
1876 
1877 public:
1878   G1ParNoteEndTask(G1CollectedHeap* g1h,
1879                    FreeRegionList* cleanup_list) :
1880     AbstractGangTask("G1 note end"), _g1h(g1h),
1881     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1882 
1883   void work(uint worker_id) {
1884     double start = os::elapsedTime();
1885     FreeRegionList local_cleanup_list("Local Cleanup List");
1886     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
1887     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1888     HRRSCleanupTask hrrs_cleanup_task;
1889     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1890                                            &old_proxy_set,
1891                                            &humongous_proxy_set,
1892                                            &hrrs_cleanup_task);
1893     if (G1CollectedHeap::use_parallel_gc_threads()) {
1894       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1895                                             _g1h->workers()->active_workers(),
1896                                             HeapRegion::NoteEndClaimValue);
1897     } else {
1898       _g1h->heap_region_iterate(&g1_note_end);
1899     }
1900     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1901 
1902     // Now update the lists
1903     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
1904                                             NULL /* free_list */,
1905                                             &old_proxy_set,
1906                                             &humongous_proxy_set,
1907                                             true /* par */);
1908     {
1909       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1910       _max_live_bytes += g1_note_end.max_live_bytes();
1911       _freed_bytes += g1_note_end.freed_bytes();
1912 
1913       // If we iterate over the global cleanup list at the end of
1914       // cleanup to do this printing we will not guarantee to only
1915       // generate output for the newly-reclaimed regions (the list
1916       // might not be empty at the beginning of cleanup; we might
1917       // still be working on its previous contents). So we do the
1918       // printing here, before we append the new regions to the global
1919       // cleanup list.
1920 
1921       G1HRPrinter* hr_printer = _g1h->hr_printer();
1922       if (hr_printer->is_active()) {
1923         HeapRegionLinkedListIterator iter(&local_cleanup_list);
1924         while (iter.more_available()) {
1925           HeapRegion* hr = iter.get_next();
1926           hr_printer->cleanup(hr);
1927         }
1928       }
1929 
1930       _cleanup_list->add_as_tail(&local_cleanup_list);
1931       assert(local_cleanup_list.is_empty(), "post-condition");
1932 
1933       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1934     }
1935   }
1936   size_t max_live_bytes() { return _max_live_bytes; }
1937   size_t freed_bytes() { return _freed_bytes; }
1938 };
1939 
1940 class G1ParScrubRemSetTask: public AbstractGangTask {
1941 protected:
1942   G1RemSet* _g1rs;
1943   BitMap* _region_bm;


1954                        HeapRegion::ScrubRemSetClaimValue);
1955     } else {
1956       _g1rs->scrub(_region_bm, _card_bm);
1957     }
1958   }
1959 
1960 };
1961 
1962 void ConcurrentMark::cleanup() {
1963   // world is stopped at this checkpoint
1964   assert(SafepointSynchronize::is_at_safepoint(),
1965          "world should be stopped");
1966   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1967 
1968   // If a full collection has happened, we shouldn't do this.
1969   if (has_aborted()) {
1970     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1971     return;
1972   }
1973 
1974   HRSPhaseSetter x(HRSPhaseCleanup);
1975   g1h->verify_region_sets_optional();
1976 
1977   if (VerifyDuringGC) {
1978     HandleMark hm;  // handle scope
1979     Universe::heap()->prepare_for_verify();
1980     Universe::verify(VerifyOption_G1UsePrevMarking,
1981                      " VerifyDuringGC:(before)");
1982   }
1983 
1984   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1985   g1p->record_concurrent_mark_cleanup_start();
1986 
1987   double start = os::elapsedTime();
1988 
1989   HeapRegionRemSet::reset_for_cleanup_tasks();
1990 
1991   uint n_workers;
1992 
1993   // Do counting once more with the world stopped for good measure.
1994   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);


2127   // We reclaimed old regions so we should calculate the sizes to make
2128   // sure we update the old gen/space data.
2129   g1h->g1mm()->update_sizes();
2130 
2131   if (VerifyDuringGC) {
2132     HandleMark hm;  // handle scope
2133     Universe::heap()->prepare_for_verify();
2134     Universe::verify(VerifyOption_G1UsePrevMarking,
2135                      " VerifyDuringGC:(after)");
2136   }
2137 
2138   g1h->verify_region_sets_optional();
2139   g1h->trace_heap_after_concurrent_cycle();
2140 }
2141 
2142 void ConcurrentMark::completeCleanup() {
2143   if (has_aborted()) return;
2144 
2145   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2146 
2147   _cleanup_list.verify_optional();
2148   FreeRegionList tmp_free_list("Tmp Free List");
2149 
2150   if (G1ConcRegionFreeingVerbose) {
2151     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2152                            "cleanup list has %u entries",
2153                            _cleanup_list.length());
2154   }
2155 
2156   // Noone else should be accessing the _cleanup_list at this point,
2157   // so it's not necessary to take any locks
2158   while (!_cleanup_list.is_empty()) {
2159     HeapRegion* hr = _cleanup_list.remove_head();
2160     assert(hr != NULL, "the list was not empty");
2161     hr->par_clear();
2162     tmp_free_list.add_as_tail(hr);
2163 
2164     // Instead of adding one region at a time to the secondary_free_list,
2165     // we accumulate them in the local list and move them a few at a
2166     // time. This also cuts down on the number of notify_all() calls
2167     // we do during this process. We'll also append the local list when
2168     // _cleanup_list is empty (which means we just removed the last




 472 }
 473 
 474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 476 #endif // _MSC_VER
 477 
 478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 479   return MAX2((n_par_threads + 2) / 4, 1U);
 480 }
 481 
 482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 483   _g1h(g1h),
 484   _markBitMap1(log2_intptr(MinObjAlignment)),
 485   _markBitMap2(log2_intptr(MinObjAlignment)),
 486   _parallel_marking_threads(0),
 487   _max_parallel_marking_threads(0),
 488   _sleep_factor(0.0),
 489   _marking_task_overhead(1.0),
 490   _cleanup_sleep_factor(0.0),
 491   _cleanup_task_overhead(1.0),
 492   _cleanup_list("Cleanup List", NULL),
 493   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 494   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 495             CardTableModRefBS::card_shift,
 496             false /* in_resource_area*/),
 497 
 498   _prevMarkBitMap(&_markBitMap1),
 499   _nextMarkBitMap(&_markBitMap2),
 500 
 501   _markStack(this),
 502   // _finger set in set_non_marking_state
 503 
 504   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 505   // _active_tasks set in set_non_marking_state
 506   // _tasks set inside the constructor
 507   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 508   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 509 
 510   _has_overflown(false),
 511   _concurrent(false),
 512   _has_aborted(false),


1792     if (G1CollectedHeap::use_parallel_gc_threads()) {
1793       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1794                                             worker_id,
1795                                             _n_workers,
1796                                             HeapRegion::FinalCountClaimValue);
1797     } else {
1798       _g1h->heap_region_iterate(&final_update_cl);
1799     }
1800   }
1801 };
1802 
1803 class G1ParNoteEndTask;
1804 
1805 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1806   G1CollectedHeap* _g1;
1807   int _worker_num;
1808   size_t _max_live_bytes;
1809   uint _regions_claimed;
1810   size_t _freed_bytes;
1811   FreeRegionList* _local_cleanup_list;
1812   HeapRegionSetCount _old_regions_removed;
1813   HeapRegionSetCount _humongous_regions_removed;
1814   HRRSCleanupTask* _hrrs_cleanup_task;
1815   double _claimed_region_time;
1816   double _max_region_time;
1817 
1818 public:
1819   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1820                              int worker_num,
1821                              FreeRegionList* local_cleanup_list,


1822                              HRRSCleanupTask* hrrs_cleanup_task) :
1823     _g1(g1), _worker_num(worker_num),
1824     _max_live_bytes(0), _regions_claimed(0),
1825     _freed_bytes(0),
1826     _claimed_region_time(0.0), _max_region_time(0.0),
1827     _local_cleanup_list(local_cleanup_list),
1828     _old_regions_removed(),
1829     _humongous_regions_removed(),
1830     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1831 
1832   size_t freed_bytes() { return _freed_bytes; }
1833   HeapRegionSetCount old_regions_removed() { return _old_regions_removed; }
1834   HeapRegionSetCount humongous_regions_removed() { return _humongous_regions_removed; }
1835 
1836   bool doHeapRegion(HeapRegion *hr) {
1837     if (hr->continuesHumongous()) {
1838       return false;
1839     }
1840     // We use a claim value of zero here because all regions
1841     // were claimed with value 1 in the FinalCount task.
1842     _g1->reset_gc_time_stamps(hr);
1843     double start = os::elapsedTime();
1844     _regions_claimed++;
1845     hr->note_end_of_marking();
1846     _max_live_bytes += hr->max_live_bytes();
1847 
1848     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1849       _freed_bytes += hr->used();
1850       hr->set_containing_set(NULL);
1851       if (hr->isHumongous()) {
1852         assert(hr->startsHumongous(), "we should only see starts humongous");
1853         _humongous_regions_removed.increment(1u, hr->capacity());
1854         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1855       } else {
1856         _old_regions_removed.increment(1u, hr->capacity());
1857         _g1->free_region(hr, _local_cleanup_list, true);
1858       }
1859     } else {
1860       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1861     }
1862 
1863     double region_time = (os::elapsedTime() - start);
1864     _claimed_region_time += region_time;
1865     if (region_time > _max_region_time) {
1866       _max_region_time = region_time;
1867     }
1868     return false;
1869   }
1870 
1871   size_t max_live_bytes() { return _max_live_bytes; }
1872   uint regions_claimed() { return _regions_claimed; }
1873   double claimed_region_time_sec() { return _claimed_region_time; }
1874   double max_region_time_sec() { return _max_region_time; }
1875 };
1876 
1877 class G1ParNoteEndTask: public AbstractGangTask {
1878   friend class G1NoteEndOfConcMarkClosure;
1879 
1880 protected:
1881   G1CollectedHeap* _g1h;
1882   size_t _max_live_bytes;
1883   size_t _freed_bytes;
1884   FreeRegionList* _cleanup_list;
1885 
1886 public:
1887   G1ParNoteEndTask(G1CollectedHeap* g1h,
1888                    FreeRegionList* cleanup_list) :
1889     AbstractGangTask("G1 note end"), _g1h(g1h),
1890     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1891 
1892   void work(uint worker_id) {
1893     double start = os::elapsedTime();
1894     FreeRegionList local_cleanup_list("Local Cleanup List", NULL);


1895     HRRSCleanupTask hrrs_cleanup_task;
1896     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,


1897                                            &hrrs_cleanup_task);
1898     if (G1CollectedHeap::use_parallel_gc_threads()) {
1899       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1900                                             _g1h->workers()->active_workers(),
1901                                             HeapRegion::NoteEndClaimValue);
1902     } else {
1903       _g1h->heap_region_iterate(&g1_note_end);
1904     }
1905     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1906 
1907     // Now update the lists
1908     _g1h->decrement_summary_bytes_mt(g1_note_end.freed_bytes());
1909     _g1h->remove_from_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());



1910     {
1911       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1912       _max_live_bytes += g1_note_end.max_live_bytes();
1913       _freed_bytes += g1_note_end.freed_bytes();
1914 
1915       // If we iterate over the global cleanup list at the end of
1916       // cleanup to do this printing we will not guarantee to only
1917       // generate output for the newly-reclaimed regions (the list
1918       // might not be empty at the beginning of cleanup; we might
1919       // still be working on its previous contents). So we do the
1920       // printing here, before we append the new regions to the global
1921       // cleanup list.
1922 
1923       G1HRPrinter* hr_printer = _g1h->hr_printer();
1924       if (hr_printer->is_active()) {
1925         FreeRegionListIterator iter(&local_cleanup_list);
1926         while (iter.more_available()) {
1927           HeapRegion* hr = iter.get_next();
1928           hr_printer->cleanup(hr);
1929         }
1930       }
1931 
1932       _cleanup_list->add_as_tail(&local_cleanup_list);
1933       assert(local_cleanup_list.is_empty(), "post-condition");
1934 
1935       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1936     }
1937   }
1938   size_t max_live_bytes() { return _max_live_bytes; }
1939   size_t freed_bytes() { return _freed_bytes; }
1940 };
1941 
1942 class G1ParScrubRemSetTask: public AbstractGangTask {
1943 protected:
1944   G1RemSet* _g1rs;
1945   BitMap* _region_bm;


1956                        HeapRegion::ScrubRemSetClaimValue);
1957     } else {
1958       _g1rs->scrub(_region_bm, _card_bm);
1959     }
1960   }
1961 
1962 };
1963 
1964 void ConcurrentMark::cleanup() {
1965   // world is stopped at this checkpoint
1966   assert(SafepointSynchronize::is_at_safepoint(),
1967          "world should be stopped");
1968   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1969 
1970   // If a full collection has happened, we shouldn't do this.
1971   if (has_aborted()) {
1972     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1973     return;
1974   }
1975 

1976   g1h->verify_region_sets_optional();
1977 
1978   if (VerifyDuringGC) {
1979     HandleMark hm;  // handle scope
1980     Universe::heap()->prepare_for_verify();
1981     Universe::verify(VerifyOption_G1UsePrevMarking,
1982                      " VerifyDuringGC:(before)");
1983   }
1984 
1985   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1986   g1p->record_concurrent_mark_cleanup_start();
1987 
1988   double start = os::elapsedTime();
1989 
1990   HeapRegionRemSet::reset_for_cleanup_tasks();
1991 
1992   uint n_workers;
1993 
1994   // Do counting once more with the world stopped for good measure.
1995   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);


2128   // We reclaimed old regions so we should calculate the sizes to make
2129   // sure we update the old gen/space data.
2130   g1h->g1mm()->update_sizes();
2131 
2132   if (VerifyDuringGC) {
2133     HandleMark hm;  // handle scope
2134     Universe::heap()->prepare_for_verify();
2135     Universe::verify(VerifyOption_G1UsePrevMarking,
2136                      " VerifyDuringGC:(after)");
2137   }
2138 
2139   g1h->verify_region_sets_optional();
2140   g1h->trace_heap_after_concurrent_cycle();
2141 }
2142 
2143 void ConcurrentMark::completeCleanup() {
2144   if (has_aborted()) return;
2145 
2146   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2147 
2148   _cleanup_list.verify_list();
2149   FreeRegionList tmp_free_list("Tmp Free List", NULL);
2150 
2151   if (G1ConcRegionFreeingVerbose) {
2152     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2153                            "cleanup list has %u entries",
2154                            _cleanup_list.length());
2155   }
2156 
2157   // Noone else should be accessing the _cleanup_list at this point,
2158   // so it's not necessary to take any locks
2159   while (!_cleanup_list.is_empty()) {
2160     HeapRegion* hr = _cleanup_list.remove_head();
2161     assert(hr != NULL, "the list was not empty");
2162     hr->par_clear();
2163     tmp_free_list.add_as_tail(hr);
2164 
2165     // Instead of adding one region at a time to the secondary_free_list,
2166     // we accumulate them in the local list and move them a few at a
2167     // time. This also cuts down on the number of notify_all() calls
2168     // we do during this process. We'll also append the local list when
2169     // _cleanup_list is empty (which means we just removed the last


src/share/vm/gc_implementation/g1/concurrentMark.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File