1753 _gc_time_stamp(0),
1754 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1755 _old_plab_stats(OldPLABSize, PLABWeight),
1756 _expand_heap_after_alloc_failure(true),
1757 _surviving_young_words(NULL),
1758 _old_marking_cycles_started(0),
1759 _old_marking_cycles_completed(0),
1760 _concurrent_cycle_started(false),
1761 _heap_summary_sent(false),
1762 _in_cset_fast_test(),
1763 _dirty_cards_region_list(NULL),
1764 _worker_cset_start_region(NULL),
1765 _worker_cset_start_region_time_stamp(NULL),
1766 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1767 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1768 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1769 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1770
1771 _g1h = this;
1772
1773 _allocator = G1Allocator::create_allocator(_g1h);
1774 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1775
1776 int n_queues = MAX2((int)ParallelGCThreads, 1);
1777 _task_queues = new RefToScanQueueSet(n_queues);
1778
1779 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1780 assert(n_rem_sets > 0, "Invariant.");
1781
1782 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1783 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1784 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1785
1786 for (int i = 0; i < n_queues; i++) {
1787 RefToScanQueue* q = new RefToScanQueue();
1788 q->initialize();
1789 _task_queues->register_queue(i, q);
1790 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1791 }
1792 clear_cset_start_regions();
1837 // is calculated by subtracting the requested size from the
1838 // 32Gb boundary and using the result as the base address for
1839 // heap reservation. If the requested size is not aligned to
1840 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1841 // into the ReservedHeapSpace constructor) then the actual
1842 // base of the reserved heap may end up differing from the
1843 // address that was requested (i.e. the preferred heap base).
1844 // If this happens then we could end up using a non-optimal
1845 // compressed oops mode.
1846
1847 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1848 heap_alignment);
1849
1850 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1851
1852 // Create the barrier set for the entire reserved region.
1853 G1SATBCardTableLoggingModRefBS* bs
1854 = new G1SATBCardTableLoggingModRefBS(reserved_region());
1855 bs->initialize();
1856 assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1857 set_barrier_set(bs);
1858
1859 // Also create a G1 rem set.
1860 _g1_rem_set = new G1RemSet(this, g1_barrier_set());
1861
1862 // Carve out the G1 part of the heap.
1863
1864 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1865 G1RegionToSpaceMapper* heap_storage =
1866 G1RegionToSpaceMapper::create_mapper(g1_rs,
1867 UseLargePages ? os::large_page_size() : os::vm_page_size(),
1868 HeapRegion::GrainBytes,
1869 1,
1870 mtJavaHeap);
1871 heap_storage->set_mapping_changed_listener(&_listener);
1872
1873 // Reserve space for the block offset table. We do not support automatic uncommit
1874 // for the card table at this time. BOT only.
1875 ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
1876 G1RegionToSpaceMapper* bot_storage =
1877 G1RegionToSpaceMapper::create_mapper(bot_rs,
2018 void G1CollectedHeap::stop() {
2019 // Stop all concurrent threads. We do this to make sure these threads
2020 // do not continue to execute and access resources (e.g. gclog_or_tty)
2021 // that are destroyed during shutdown.
2022 _cg1r->stop();
2023 _cmThread->stop();
2024 if (G1StringDedup::is_enabled()) {
2025 G1StringDedup::stop();
2026 }
2027 }
2028
2029 void G1CollectedHeap::clear_humongous_is_live_table() {
2030 guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2031 _humongous_is_live.clear();
2032 }
2033
2034 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2035 return HeapRegion::max_region_size();
2036 }
2037
2038 void G1CollectedHeap::ref_processing_init() {
2039 // Reference processing in G1 currently works as follows:
2040 //
2041 // * There are two reference processor instances. One is
2042 // used to record and process discovered references
2043 // during concurrent marking; the other is used to
2044 // record and process references during STW pauses
2045 // (both full and incremental).
2046 // * Both ref processors need to 'span' the entire heap as
2047 // the regions in the collection set may be dotted around.
2048 //
2049 // * For the concurrent marking ref processor:
2050 // * Reference discovery is enabled at initial marking.
2051 // * Reference discovery is disabled and the discovered
2052 // references processed etc during remarking.
2053 // * Reference discovery is MT (see below).
2054 // * Reference discovery requires a barrier (see below).
2055 // * Reference processing may or may not be MT
2056 // (depending on the value of ParallelRefProcEnabled
2057 // and ParallelGCThreads).
2058 // * A full GC disables reference discovery by the CM
2059 // ref processor and abandons any entries on it's
2060 // discovered lists.
2061 //
2062 // * For the STW processor:
2063 // * Non MT discovery is enabled at the start of a full GC.
2064 // * Processing and enqueueing during a full GC is non-MT.
2065 // * During a full GC, references are processed after marking.
2066 //
2067 // * Discovery (may or may not be MT) is enabled at the start
2068 // of an incremental evacuation pause.
2069 // * References are processed near the end of a STW evacuation pause.
2070 // * For both types of GC:
2071 // * Discovery is atomic - i.e. not concurrent.
2072 // * Reference discovery will not need a barrier.
2073
2074 SharedHeap::ref_processing_init();
2075 MemRegion mr = reserved_region();
2076
2077 // Concurrent Mark ref processor
2078 _ref_processor_cm =
2079 new ReferenceProcessor(mr, // span
2080 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2081 // mt processing
2082 (int) ParallelGCThreads,
2083 // degree of mt processing
2084 (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2085 // mt discovery
2086 (int) MAX2(ParallelGCThreads, ConcGCThreads),
2087 // degree of mt discovery
2088 false,
2089 // Reference discovery is not atomic
2090 &_is_alive_closure_cm);
2091 // is alive closure
2092 // (for efficiency/performance)
2093
2094 // STW ref processor
2141 virtual bool doHeapRegion(HeapRegion* hr) {
2142 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2143 if (_gc_time_stamp != region_gc_time_stamp) {
2144 gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2145 "expected %d", HR_FORMAT_PARAMS(hr),
2146 region_gc_time_stamp, _gc_time_stamp);
2147 _failures = true;
2148 }
2149 return false;
2150 }
2151
2152 bool failures() { return _failures; }
2153 };
2154
2155 void G1CollectedHeap::check_gc_time_stamps() {
2156 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2157 heap_region_iterate(&cl);
2158 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2159 }
2160
2161 bool G1CollectedHeap::heap_lock_held_for_gc() {
2162 Thread* t = Thread::current();
2163 return Heap_lock->owned_by_self()
2164 || ( (t->is_GC_task_thread() || t->is_VM_thread())
2165 && _thread_holds_heap_lock_for_gc);
2166 }
2167
2168 #endif // PRODUCT
2169
2170 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2171 DirtyCardQueue* into_cset_dcq,
2172 bool concurrent,
2173 uint worker_i) {
2174 // Clean cards in the hot card cache
2175 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2176 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2177
2178 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2179 size_t n_completed_buffers = 0;
2180 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2489 return false;
2490 }
2491 };
2492
2493 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2494 IterateObjectClosureRegionClosure blk(cl);
2495 heap_region_iterate(&blk);
2496 }
2497
2498 // Calls a SpaceClosure on a HeapRegion.
2499
2500 class SpaceClosureRegionClosure: public HeapRegionClosure {
2501 SpaceClosure* _cl;
2502 public:
2503 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2504 bool doHeapRegion(HeapRegion* r) {
2505 _cl->do_space(r);
2506 return false;
2507 }
2508 };
2509
2510 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2511 SpaceClosureRegionClosure blk(cl);
2512 heap_region_iterate(&blk);
2513 }
2514
2515 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2516 _hrm.iterate(cl);
2517 }
2518
2519 void
2520 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2521 uint worker_id,
2522 HeapRegionClaimer *hrclaimer,
2523 bool concurrent) const {
2524 _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2525 }
2526
2527 // Clear the cached CSet starting regions and (more importantly)
2528 // the time stamps. Called when we reset the GC time stamp.
2529 void G1CollectedHeap::clear_cset_start_regions() {
2530 assert(_worker_cset_start_region != NULL, "sanity");
2531 assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2532
2533 int n_queues = MAX2((int)ParallelGCThreads, 1);
|
1753 _gc_time_stamp(0),
1754 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1755 _old_plab_stats(OldPLABSize, PLABWeight),
1756 _expand_heap_after_alloc_failure(true),
1757 _surviving_young_words(NULL),
1758 _old_marking_cycles_started(0),
1759 _old_marking_cycles_completed(0),
1760 _concurrent_cycle_started(false),
1761 _heap_summary_sent(false),
1762 _in_cset_fast_test(),
1763 _dirty_cards_region_list(NULL),
1764 _worker_cset_start_region(NULL),
1765 _worker_cset_start_region_time_stamp(NULL),
1766 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1767 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1768 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1769 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1770
1771 _g1h = this;
1772
1773 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
1774 /* are_GC_task_threads */true,
1775 /* are_ConcurrentGC_threads */false);
1776 _workers->initialize_workers();
1777
1778 _allocator = G1Allocator::create_allocator(_g1h);
1779 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1780
1781 int n_queues = MAX2((int)ParallelGCThreads, 1);
1782 _task_queues = new RefToScanQueueSet(n_queues);
1783
1784 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1785 assert(n_rem_sets > 0, "Invariant.");
1786
1787 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1788 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1789 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1790
1791 for (int i = 0; i < n_queues; i++) {
1792 RefToScanQueue* q = new RefToScanQueue();
1793 q->initialize();
1794 _task_queues->register_queue(i, q);
1795 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1796 }
1797 clear_cset_start_regions();
1842 // is calculated by subtracting the requested size from the
1843 // 32Gb boundary and using the result as the base address for
1844 // heap reservation. If the requested size is not aligned to
1845 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1846 // into the ReservedHeapSpace constructor) then the actual
1847 // base of the reserved heap may end up differing from the
1848 // address that was requested (i.e. the preferred heap base).
1849 // If this happens then we could end up using a non-optimal
1850 // compressed oops mode.
1851
1852 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1853 heap_alignment);
1854
1855 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1856
1857 // Create the barrier set for the entire reserved region.
1858 G1SATBCardTableLoggingModRefBS* bs
1859 = new G1SATBCardTableLoggingModRefBS(reserved_region());
1860 bs->initialize();
1861 assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1862 _barrier_set = bs;
1863 oopDesc::set_bs(bs);
1864
1865 // Also create a G1 rem set.
1866 _g1_rem_set = new G1RemSet(this, g1_barrier_set());
1867
1868 // Carve out the G1 part of the heap.
1869
1870 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1871 G1RegionToSpaceMapper* heap_storage =
1872 G1RegionToSpaceMapper::create_mapper(g1_rs,
1873 UseLargePages ? os::large_page_size() : os::vm_page_size(),
1874 HeapRegion::GrainBytes,
1875 1,
1876 mtJavaHeap);
1877 heap_storage->set_mapping_changed_listener(&_listener);
1878
1879 // Reserve space for the block offset table. We do not support automatic uncommit
1880 // for the card table at this time. BOT only.
1881 ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
1882 G1RegionToSpaceMapper* bot_storage =
1883 G1RegionToSpaceMapper::create_mapper(bot_rs,
2024 void G1CollectedHeap::stop() {
2025 // Stop all concurrent threads. We do this to make sure these threads
2026 // do not continue to execute and access resources (e.g. gclog_or_tty)
2027 // that are destroyed during shutdown.
2028 _cg1r->stop();
2029 _cmThread->stop();
2030 if (G1StringDedup::is_enabled()) {
2031 G1StringDedup::stop();
2032 }
2033 }
2034
2035 void G1CollectedHeap::clear_humongous_is_live_table() {
2036 guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2037 _humongous_is_live.clear();
2038 }
2039
2040 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2041 return HeapRegion::max_region_size();
2042 }
2043
2044 void G1CollectedHeap::post_initialize() {
2045 CollectedHeap::post_initialize();
2046 ref_processing_init();
2047 }
2048
2049 void G1CollectedHeap::ref_processing_init() {
2050 // Reference processing in G1 currently works as follows:
2051 //
2052 // * There are two reference processor instances. One is
2053 // used to record and process discovered references
2054 // during concurrent marking; the other is used to
2055 // record and process references during STW pauses
2056 // (both full and incremental).
2057 // * Both ref processors need to 'span' the entire heap as
2058 // the regions in the collection set may be dotted around.
2059 //
2060 // * For the concurrent marking ref processor:
2061 // * Reference discovery is enabled at initial marking.
2062 // * Reference discovery is disabled and the discovered
2063 // references processed etc during remarking.
2064 // * Reference discovery is MT (see below).
2065 // * Reference discovery requires a barrier (see below).
2066 // * Reference processing may or may not be MT
2067 // (depending on the value of ParallelRefProcEnabled
2068 // and ParallelGCThreads).
2069 // * A full GC disables reference discovery by the CM
2070 // ref processor and abandons any entries on it's
2071 // discovered lists.
2072 //
2073 // * For the STW processor:
2074 // * Non MT discovery is enabled at the start of a full GC.
2075 // * Processing and enqueueing during a full GC is non-MT.
2076 // * During a full GC, references are processed after marking.
2077 //
2078 // * Discovery (may or may not be MT) is enabled at the start
2079 // of an incremental evacuation pause.
2080 // * References are processed near the end of a STW evacuation pause.
2081 // * For both types of GC:
2082 // * Discovery is atomic - i.e. not concurrent.
2083 // * Reference discovery will not need a barrier.
2084
2085 MemRegion mr = reserved_region();
2086
2087 // Concurrent Mark ref processor
2088 _ref_processor_cm =
2089 new ReferenceProcessor(mr, // span
2090 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2091 // mt processing
2092 (int) ParallelGCThreads,
2093 // degree of mt processing
2094 (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2095 // mt discovery
2096 (int) MAX2(ParallelGCThreads, ConcGCThreads),
2097 // degree of mt discovery
2098 false,
2099 // Reference discovery is not atomic
2100 &_is_alive_closure_cm);
2101 // is alive closure
2102 // (for efficiency/performance)
2103
2104 // STW ref processor
2151 virtual bool doHeapRegion(HeapRegion* hr) {
2152 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2153 if (_gc_time_stamp != region_gc_time_stamp) {
2154 gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2155 "expected %d", HR_FORMAT_PARAMS(hr),
2156 region_gc_time_stamp, _gc_time_stamp);
2157 _failures = true;
2158 }
2159 return false;
2160 }
2161
2162 bool failures() { return _failures; }
2163 };
2164
2165 void G1CollectedHeap::check_gc_time_stamps() {
2166 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2167 heap_region_iterate(&cl);
2168 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2169 }
2170
2171 void G1CollectedHeap::set_heap_lock_held_for_gc(bool value) {
2172 _thread_holds_heap_lock_for_gc = value;
2173 }
2174
2175 bool G1CollectedHeap::heap_lock_held_for_gc() {
2176 Thread* t = Thread::current();
2177 return Heap_lock->owned_by_self()
2178 || ( (t->is_GC_task_thread() || t->is_VM_thread())
2179 && _thread_holds_heap_lock_for_gc);
2180 }
2181
2182 #endif // PRODUCT
2183
2184 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2185 DirtyCardQueue* into_cset_dcq,
2186 bool concurrent,
2187 uint worker_i) {
2188 // Clean cards in the hot card cache
2189 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2190 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2191
2192 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2193 size_t n_completed_buffers = 0;
2194 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2503 return false;
2504 }
2505 };
2506
2507 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2508 IterateObjectClosureRegionClosure blk(cl);
2509 heap_region_iterate(&blk);
2510 }
2511
2512 // Calls a SpaceClosure on a HeapRegion.
2513
2514 class SpaceClosureRegionClosure: public HeapRegionClosure {
2515 SpaceClosure* _cl;
2516 public:
2517 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2518 bool doHeapRegion(HeapRegion* r) {
2519 _cl->do_space(r);
2520 return false;
2521 }
2522 };
2523
2524 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2525 _hrm.iterate(cl);
2526 }
2527
2528 void
2529 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2530 uint worker_id,
2531 HeapRegionClaimer *hrclaimer,
2532 bool concurrent) const {
2533 _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2534 }
2535
2536 // Clear the cached CSet starting regions and (more importantly)
2537 // the time stamps. Called when we reset the GC time stamp.
2538 void G1CollectedHeap::clear_cset_start_regions() {
2539 assert(_worker_cset_start_region != NULL, "sanity");
2540 assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2541
2542 int n_queues = MAX2((int)ParallelGCThreads, 1);
|