105 // concurrently, and so we do not care if card_ptr contains references
106 // that point into the collection set.
107 assert(!oops_into_cset, "should be");
108
109 if (_concurrent && SuspendibleThreadSet::should_yield()) {
110 // Caller will actually yield.
111 return false;
112 }
113 // Otherwise, we finished successfully; return true.
114 return true;
115 }
116
117 void set_concurrent(bool b) { _concurrent = b; }
118 };
119
120
121 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
122 private:
123 size_t _num_dirtied;
124 G1CollectedHeap* _g1h;
125 G1SATBCardTableLoggingModRefBS* _g1_bs;
126
127 HeapRegion* region_for_card(jbyte* card_ptr) const {
128 return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
129 }
130
131 bool will_become_free(HeapRegion* hr) const {
132 // A region will be freed by free_collection_set if the region is in the
133 // collection set and has not had an evacuation failure.
134 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
135 }
136
137 public:
138 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
139 _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
140
141 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
142 HeapRegion* hr = region_for_card(card_ptr);
143
144 // Should only dirty cards in regions that won't be freed.
145 if (!will_become_free(hr)) {
146 *card_ptr = CardTableModRefBS::dirty_card_val();
147 _num_dirtied++;
148 }
149
150 return true;
151 }
152
153 size_t num_dirtied() const { return _num_dirtied; }
154 };
155
156
157 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
158 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
159 }
160
161 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
162 // The from card cache is not the memory that is actually committed. So we cannot
163 // take advantage of the zero_filled parameter.
164 reset_from_card_cache(start_idx, num_regions);
165 }
166
1059 bool expect_null_mutator_alloc_region) {
1060 assert_at_safepoint(true /* should_be_vm_thread */);
1061 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1062 "the current alloc region was unexpectedly found to be non-NULL");
1063
1064 if (!is_humongous(word_size)) {
1065 return _allocator->attempt_allocation_locked(word_size, context);
1066 } else {
1067 HeapWord* result = humongous_obj_allocate(word_size, context);
1068 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1069 collector_state()->set_initiate_conc_mark_if_possible(true);
1070 }
1071 return result;
1072 }
1073
1074 ShouldNotReachHere();
1075 }
1076
1077 class PostMCRemSetClearClosure: public HeapRegionClosure {
1078 G1CollectedHeap* _g1h;
1079 ModRefBarrierSet* _mr_bs;
1080 public:
1081 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1082 _g1h(g1h), _mr_bs(mr_bs) {}
1083
1084 bool doHeapRegion(HeapRegion* r) {
1085 HeapRegionRemSet* hrrs = r->rem_set();
1086
1087 _g1h->reset_gc_time_stamps(r);
1088
1089 if (r->is_continues_humongous()) {
1090 // We'll assert that the strong code root list and RSet is empty
1091 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1092 assert(hrrs->occupied() == 0, "RSet should be empty");
1093 } else {
1094 hrrs->clear();
1095 }
1096 // You might think here that we could clear just the cards
1097 // corresponding to the used region. But no: if we leave a dirty card
1098 // in a region we might allocate into, then it would prevent that card
1099 // from being enqueued, and cause it to be missed.
1100 // Re: the performance cost: we shouldn't be doing full GC anyway!
1101 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1102
1103 return false;
1104 }
1105 };
1106
1107 void G1CollectedHeap::clear_rsets_post_compaction() {
1108 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1109 heap_region_iterate(&rs_clear);
1110 }
1111
1112 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1113 G1CollectedHeap* _g1h;
1114 UpdateRSOopClosure _cl;
1115 public:
1116 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1117 _cl(g1->g1_rem_set(), worker_i),
1118 _g1h(g1)
1119 { }
1120
1121 bool doHeapRegion(HeapRegion* r) {
1122 if (!r->is_continues_humongous()) {
1123 _cl.set_from(r);
1124 r->oop_iterate(&_cl);
1125 }
1126 return false;
1127 }
1128 };
1333 workers()->active_workers(),
1334 Threads::number_of_non_daemon_threads());
1335 workers()->update_active_workers(n_workers);
1336 log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
1337
1338 ParRebuildRSTask rebuild_rs_task(this);
1339 workers()->run_task(&rebuild_rs_task);
1340
1341 // Rebuild the strong code root lists for each region
1342 rebuild_strong_code_roots();
1343
1344 if (true) { // FIXME
1345 MetaspaceGC::compute_new_size();
1346 }
1347
1348 #ifdef TRACESPINNING
1349 ParallelTaskTerminator::print_termination_counts();
1350 #endif
1351
1352 // Discard all rset updates
1353 JavaThread::dirty_card_queue_set().abandon_logs();
1354 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1355
1356 // At this point there should be no regions in the
1357 // entire heap tagged as young.
1358 assert(check_young_list_empty(), "young list should be empty at this point");
1359
1360 // Update the number of full collections that have been completed.
1361 increment_old_marking_cycles_completed(false /* concurrent */);
1362
1363 _hrm.verify_optional();
1364 _verifier->verify_region_sets_optional();
1365
1366 _verifier->verify_after_gc();
1367
1368 // Clear the previous marking bitmap, if needed for bitmap verification.
1369 // Note we cannot do this when we clear the next marking bitmap in
1370 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371 // objects marked during a full GC against the previous bitmap.
1372 // But we need to clear it before calling check_bitmaps below since
1373 // the full GC has compacted objects and updated TAMS but not updated
1815
1816 // Reserve the maximum.
1817
1818 // When compressed oops are enabled, the preferred heap base
1819 // is calculated by subtracting the requested size from the
1820 // 32Gb boundary and using the result as the base address for
1821 // heap reservation. If the requested size is not aligned to
1822 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1823 // into the ReservedHeapSpace constructor) then the actual
1824 // base of the reserved heap may end up differing from the
1825 // address that was requested (i.e. the preferred heap base).
1826 // If this happens then we could end up using a non-optimal
1827 // compressed oops mode.
1828
1829 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1830 heap_alignment);
1831
1832 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1833
1834 // Create the barrier set for the entire reserved region.
1835 G1SATBCardTableLoggingModRefBS* bs
1836 = new G1SATBCardTableLoggingModRefBS(reserved_region());
1837 bs->initialize();
1838 assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1839 set_barrier_set(bs);
1840
1841 // Create the hot card cache.
1842 _hot_card_cache = new G1HotCardCache(this);
1843
1844 // Also create a G1 rem set.
1845 _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
1846
1847 // Carve out the G1 part of the heap.
1848 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1849 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1850 G1RegionToSpaceMapper* heap_storage =
1851 G1RegionToSpaceMapper::create_mapper(g1_rs,
1852 g1_rs.size(),
1853 page_size,
1854 HeapRegion::GrainBytes,
1855 1,
1856 mtJavaHeap);
1857 os::trace_page_sizes("Heap",
1858 collector_policy()->min_heap_byte_size(),
1859 max_byte_size,
1860 page_size,
1861 heap_rs.base(),
1862 heap_rs.size());
1863 heap_storage->set_mapping_changed_listener(&_listener);
1864
1865 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1866 G1RegionToSpaceMapper* bot_storage =
1867 create_aux_memory_mapper("Block Offset Table",
1868 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1869 G1BlockOffsetTable::heap_map_factor());
1870
1871 ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
1872 G1RegionToSpaceMapper* cardtable_storage =
1873 create_aux_memory_mapper("Card Table",
1874 G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
1875 G1SATBCardTableLoggingModRefBS::heap_map_factor());
1876
1877 G1RegionToSpaceMapper* card_counts_storage =
1878 create_aux_memory_mapper("Card Counts Table",
1879 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1880 G1CardCounts::heap_map_factor());
1881
1882 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1883 G1RegionToSpaceMapper* prev_bitmap_storage =
1884 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1885 G1RegionToSpaceMapper* next_bitmap_storage =
1886 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1887
1888 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1889 g1_barrier_set()->initialize(cardtable_storage);
1890 // Do later initialization work for concurrent refinement.
1891 _hot_card_cache->initialize(card_counts_storage);
1892
1893 // 6843694 - ensure that the maximum region index can fit
1894 // in the remembered set structures.
1895 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1896 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1897
1898 g1_rem_set()->initialize(max_capacity(), max_regions());
1899
1900 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1901 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1902 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1903 "too many cards per region");
1904
1905 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1906
1907 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1908
1909 {
1916 }
1917
1918 // Create the G1ConcurrentMark data structure and thread.
1919 // (Must do this late, so that "max_regions" is defined.)
1920 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1921 if (_cm == NULL || !_cm->completed_initialization()) {
1922 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1923 return JNI_ENOMEM;
1924 }
1925 _cmThread = _cm->cmThread();
1926
1927 // Now expand into the initial heap size.
1928 if (!expand(init_byte_size, _workers)) {
1929 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1930 return JNI_ENOMEM;
1931 }
1932
1933 // Perform any initialization actions delegated to the policy.
1934 g1_policy()->init(this, &_collection_set);
1935
1936 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1937 SATB_Q_FL_lock,
1938 G1SATBProcessCompletedThreshold,
1939 Shared_SATB_Q_lock);
1940
1941 JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1942 DirtyCardQ_CBL_mon,
1943 DirtyCardQ_FL_lock,
1944 (int)concurrent_g1_refine()->yellow_zone(),
1945 (int)concurrent_g1_refine()->red_zone(),
1946 Shared_DirtyCardQ_lock,
1947 NULL, // fl_owner
1948 true); // init_free_ids
1949
1950 dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1951 DirtyCardQ_CBL_mon,
1952 DirtyCardQ_FL_lock,
1953 -1, // never trigger processing
1954 -1, // no limit on length
1955 Shared_DirtyCardQ_lock,
1956 &JavaThread::dirty_card_queue_set());
1957
1958 // Here we allocate the dummy HeapRegion that is required by the
1959 // G1AllocRegion class.
1960 HeapRegion* dummy_region = _hrm.get_dummy_region();
1961
1962 // We'll re-use the same region whether the alloc region will
1963 // require BOT updates or not and, if it doesn't, then a non-young
1964 // region will complain that it cannot support allocations without
1965 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1966 dummy_region->set_eden();
1967 // Make sure it's full.
1968 dummy_region->set_top(dummy_region->end());
1969 G1AllocRegion::setup(this, dummy_region);
1970
1971 _allocator->init_mutator_alloc_region();
1972
1973 // Do create of the monitoring and management support so that
1974 // values in the heap have been properly initialized.
1975 _g1mm = new G1MonitoringSupport(this);
1976
1977 G1StringDedup::initialize();
1978
1979 _preserved_marks_set.init(ParallelGCThreads);
1980
1981 _collection_set.initialize(max_regions());
1982
1983 return JNI_OK;
1984 }
1985
1986 void G1CollectedHeap::stop() {
1987 // Stop all concurrent threads. We do this to make sure these threads
1988 // do not continue to execute and access resources (e.g. logging)
1989 // that are destroyed during shutdown.
1990 _cg1r->stop();
1991 _cmThread->stop();
1992 if (G1StringDedup::is_enabled()) {
1993 G1StringDedup::stop();
1994 }
1995 }
1996
1997 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1998 return HeapRegion::max_region_size();
1999 }
2000
2001 void G1CollectedHeap::post_initialize() {
2002 ref_processing_init();
2003 }
2004
2005 void G1CollectedHeap::ref_processing_init() {
2006 // Reference processing in G1 currently works as follows:
2007 //
2008 // * There are two reference processor instances. One is
2009 // used to record and process discovered references
2010 // during concurrent marking; the other is used to
2011 // record and process references during STW pauses
2012 // (both full and incremental).
2013 // * Both ref processors need to 'span' the entire heap as
2014 // the regions in the collection set may be dotted around.
2015 //
2016 // * For the concurrent marking ref processor:
2106 _failures = true;
2107 }
2108 return false;
2109 }
2110
2111 bool failures() { return _failures; }
2112 };
2113
2114 void G1CollectedHeap::check_gc_time_stamps() {
2115 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2116 heap_region_iterate(&cl);
2117 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2118 }
2119 #endif // PRODUCT
2120
2121 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2122 _hot_card_cache->drain(cl, worker_i);
2123 }
2124
2125 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
2126 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2127 size_t n_completed_buffers = 0;
2128 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2129 n_completed_buffers++;
2130 }
2131 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2132 dcqs.clear_n_completed_buffers();
2133 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2134 }
2135
2136 // Computes the sum of the storage used by the various regions.
2137 size_t G1CollectedHeap::used() const {
2138 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2139 if (_archive_allocator != NULL) {
2140 result += _archive_allocator->used();
2141 }
2142 return result;
2143 }
2144
2145 size_t G1CollectedHeap::used_unlocked() const {
2146 return _summary_bytes_used;
2482 void G1CollectedHeap::prepare_for_verify() {
2483 _verifier->prepare_for_verify();
2484 }
2485
2486 void G1CollectedHeap::verify(VerifyOption vo) {
2487 _verifier->verify(vo);
2488 }
2489
2490 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2491 return true;
2492 }
2493
2494 const char* const* G1CollectedHeap::concurrent_phases() const {
2495 return _cmThread->concurrent_phases();
2496 }
2497
2498 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2499 return _cmThread->request_concurrent_phase(phase);
2500 }
2501
2502 class PrintRegionClosure: public HeapRegionClosure {
2503 outputStream* _st;
2504 public:
2505 PrintRegionClosure(outputStream* st) : _st(st) {}
2506 bool doHeapRegion(HeapRegion* r) {
2507 r->print_on(_st);
2508 return false;
2509 }
2510 };
2511
2512 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2513 const HeapRegion* hr,
2514 const VerifyOption vo) const {
2515 switch (vo) {
2516 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2517 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2518 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
2519 default: ShouldNotReachHere();
2520 }
2521 return false; // keep some compilers happy
2760 return result;
2761 }
2762
2763 void
2764 G1CollectedHeap::doConcurrentMark() {
2765 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2766 if (!_cmThread->in_progress()) {
2767 _cmThread->set_started();
2768 CGC_lock->notify();
2769 }
2770 }
2771
2772 size_t G1CollectedHeap::pending_card_num() {
2773 size_t extra_cards = 0;
2774 JavaThread *curr = Threads::first();
2775 while (curr != NULL) {
2776 DirtyCardQueue& dcq = curr->dirty_card_queue();
2777 extra_cards += dcq.size();
2778 curr = curr->next();
2779 }
2780 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2781 size_t buffer_size = dcqs.buffer_size();
2782 size_t buffer_num = dcqs.completed_buffers_num();
2783
2784 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
2785 // in bytes - not the number of 'entries'. We need to convert
2786 // into a number of cards.
2787 return (buffer_size * buffer_num + extra_cards) / oopSize;
2788 }
2789
2790 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2791 private:
2792 size_t _total_humongous;
2793 size_t _candidate_humongous;
2794
2795 DirtyCardQueue _dcq;
2796
2797 // We don't nominate objects with many remembered set entries, on
2798 // the assumption that such objects are likely still live.
2799 bool is_remset_small(HeapRegion* region) const {
2800 HeapRegionRemSet* const rset = region->rem_set();
2840 // set entries on other regions. In order to reclaim such an
2841 // object, those remembered sets would need to be cleaned up.
2842 //
2843 // We also treat is_typeArray() objects specially, allowing them
2844 // to be reclaimed even if allocated before the start of
2845 // concurrent mark. For this we rely on mark stack insertion to
2846 // exclude is_typeArray() objects, preventing reclaiming an object
2847 // that is in the mark stack. We also rely on the metadata for
2848 // such objects to be built-in and so ensured to be kept live.
2849 // Frequent allocation and drop of large binary blobs is an
2850 // important use case for eager reclaim, and this special handling
2851 // may reduce needed headroom.
2852
2853 return obj->is_typeArray() && is_remset_small(region);
2854 }
2855
2856 public:
2857 RegisterHumongousWithInCSetFastTestClosure()
2858 : _total_humongous(0),
2859 _candidate_humongous(0),
2860 _dcq(&JavaThread::dirty_card_queue_set()) {
2861 }
2862
2863 virtual bool doHeapRegion(HeapRegion* r) {
2864 if (!r->is_starts_humongous()) {
2865 return false;
2866 }
2867 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2868
2869 bool is_candidate = humongous_region_is_candidate(g1h, r);
2870 uint rindex = r->hrm_index();
2871 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2872 if (is_candidate) {
2873 _candidate_humongous++;
2874 g1h->register_humongous_region_with_cset(rindex);
2875 // Is_candidate already filters out humongous object with large remembered sets.
2876 // If we have a humongous object with a few remembered sets, we simply flush these
2877 // remembered set entries into the DCQS. That will result in automatic
2878 // re-evaluation of their remembered set entries during the following evacuation
2879 // phase.
2880 if (!r->rem_set()->is_empty()) {
2881 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2882 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2883 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2884 HeapRegionRemSetIterator hrrs(r->rem_set());
2885 size_t card_index;
2886 while (hrrs.has_next(card_index)) {
2887 jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
2888 // The remembered set might contain references to already freed
2889 // regions. Filter out such entries to avoid failing card table
2890 // verification.
2891 if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
2892 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
2893 *card_ptr = CardTableModRefBS::dirty_card_val();
2894 _dcq.enqueue(card_ptr);
2895 }
2896 }
2897 }
2898 assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2899 "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2900 hrrs.n_yielded(), r->rem_set()->occupied());
2901 r->rem_set()->clear_locked();
2902 }
2903 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2904 }
2905 _total_humongous++;
2906
2907 return false;
2908 }
2909
2910 size_t total_humongous() const { return _total_humongous; }
2911 size_t candidate_humongous() const { return _candidate_humongous; }
2912
2913 void flush_rem_set_entries() { _dcq.flush(); }
3917 _queue(queue), _g1h(g1h) { }
3918
3919 virtual void work(uint worker_id) {
3920 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3921 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3922
3923 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3924 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3925
3926 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3927 }
3928 };
3929
3930 void G1CollectedHeap::redirty_logged_cards() {
3931 double redirty_logged_cards_start = os::elapsedTime();
3932
3933 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3934 dirty_card_queue_set().reset_for_par_iteration();
3935 workers()->run_task(&redirty_task);
3936
3937 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3938 dcq.merge_bufferlists(&dirty_card_queue_set());
3939 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3940
3941 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3942 }
3943
3944 // Weak Reference Processing support
3945
3946 // An always "is_alive" closure that is used to preserve referents.
3947 // If the object is non-null then it's alive. Used in the preservation
3948 // of referent objects that are pointed to by reference objects
3949 // discovered by the CM ref processor.
3950 class G1AlwaysAliveClosure: public BoolObjectClosure {
3951 G1CollectedHeap* _g1;
3952 public:
3953 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3954 bool do_object_b(oop p) {
3955 if (p != NULL) {
3956 return true;
3957 }
5443 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5444 HeapRegion* hr = _g1h->heap_region_containing(obj);
5445 assert(!hr->is_continues_humongous(),
5446 "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5447 " starting at " HR_FORMAT,
5448 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5449
5450 hr->remove_strong_code_root(_nm);
5451 }
5452 }
5453
5454 public:
5455 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5456 _g1h(g1h), _nm(nm) {}
5457
5458 void do_oop(oop* p) { do_oop_work(p); }
5459 void do_oop(narrowOop* p) { do_oop_work(p); }
5460 };
5461
5462 void G1CollectedHeap::register_nmethod(nmethod* nm) {
5463 CollectedHeap::register_nmethod(nm);
5464
5465 guarantee(nm != NULL, "sanity");
5466 RegisterNMethodOopClosure reg_cl(this, nm);
5467 nm->oops_do(®_cl);
5468 }
5469
5470 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
5471 CollectedHeap::unregister_nmethod(nm);
5472
5473 guarantee(nm != NULL, "sanity");
5474 UnregisterNMethodOopClosure reg_cl(this, nm);
5475 nm->oops_do(®_cl, true);
5476 }
5477
5478 void G1CollectedHeap::purge_code_root_memory() {
5479 double purge_start = os::elapsedTime();
5480 G1CodeRootSet::purge();
5481 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
5482 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
5483 }
5484
5485 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
5486 G1CollectedHeap* _g1h;
5487
5488 public:
5489 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5490 _g1h(g1h) {}
5491
5492 void do_code_blob(CodeBlob* cb) {
|
105 // concurrently, and so we do not care if card_ptr contains references
106 // that point into the collection set.
107 assert(!oops_into_cset, "should be");
108
109 if (_concurrent && SuspendibleThreadSet::should_yield()) {
110 // Caller will actually yield.
111 return false;
112 }
113 // Otherwise, we finished successfully; return true.
114 return true;
115 }
116
117 void set_concurrent(bool b) { _concurrent = b; }
118 };
119
120
121 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
122 private:
123 size_t _num_dirtied;
124 G1CollectedHeap* _g1h;
125 G1CardTable* _g1_ct;
126
127 HeapRegion* region_for_card(jbyte* card_ptr) const {
128 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
129 }
130
131 bool will_become_free(HeapRegion* hr) const {
132 // A region will be freed by free_collection_set if the region is in the
133 // collection set and has not had an evacuation failure.
134 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
135 }
136
137 public:
138 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
139 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->g1_card_table()) { }
140
141 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
142 HeapRegion* hr = region_for_card(card_ptr);
143
144 // Should only dirty cards in regions that won't be freed.
145 if (!will_become_free(hr)) {
146 *card_ptr = G1CardTable::dirty_card_val();
147 _num_dirtied++;
148 }
149
150 return true;
151 }
152
153 size_t num_dirtied() const { return _num_dirtied; }
154 };
155
156
157 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
158 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
159 }
160
161 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
162 // The from card cache is not the memory that is actually committed. So we cannot
163 // take advantage of the zero_filled parameter.
164 reset_from_card_cache(start_idx, num_regions);
165 }
166
1059 bool expect_null_mutator_alloc_region) {
1060 assert_at_safepoint(true /* should_be_vm_thread */);
1061 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1062 "the current alloc region was unexpectedly found to be non-NULL");
1063
1064 if (!is_humongous(word_size)) {
1065 return _allocator->attempt_allocation_locked(word_size, context);
1066 } else {
1067 HeapWord* result = humongous_obj_allocate(word_size, context);
1068 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1069 collector_state()->set_initiate_conc_mark_if_possible(true);
1070 }
1071 return result;
1072 }
1073
1074 ShouldNotReachHere();
1075 }
1076
1077 class PostMCRemSetClearClosure: public HeapRegionClosure {
1078 G1CollectedHeap* _g1h;
1079 G1CardTable* _ct;
1080 public:
1081 PostMCRemSetClearClosure(G1CollectedHeap* g1h, G1CardTable* ct) :
1082 _g1h(g1h), _ct(ct) {}
1083
1084 bool doHeapRegion(HeapRegion* r) {
1085 HeapRegionRemSet* hrrs = r->rem_set();
1086
1087 _g1h->reset_gc_time_stamps(r);
1088
1089 if (r->is_continues_humongous()) {
1090 // We'll assert that the strong code root list and RSet is empty
1091 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1092 assert(hrrs->occupied() == 0, "RSet should be empty");
1093 } else {
1094 hrrs->clear();
1095 }
1096 // You might think here that we could clear just the cards
1097 // corresponding to the used region. But no: if we leave a dirty card
1098 // in a region we might allocate into, then it would prevent that card
1099 // from being enqueued, and cause it to be missed.
1100 // Re: the performance cost: we shouldn't be doing full GC anyway!
1101 _ct->clear(MemRegion(r->bottom(), r->end()));
1102
1103 return false;
1104 }
1105 };
1106
1107 void G1CollectedHeap::clear_rsets_post_compaction() {
1108 PostMCRemSetClearClosure rs_clear(this, g1_card_table());
1109 heap_region_iterate(&rs_clear);
1110 }
1111
1112 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1113 G1CollectedHeap* _g1h;
1114 UpdateRSOopClosure _cl;
1115 public:
1116 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1117 _cl(g1->g1_rem_set(), worker_i),
1118 _g1h(g1)
1119 { }
1120
1121 bool doHeapRegion(HeapRegion* r) {
1122 if (!r->is_continues_humongous()) {
1123 _cl.set_from(r);
1124 r->oop_iterate(&_cl);
1125 }
1126 return false;
1127 }
1128 };
1333 workers()->active_workers(),
1334 Threads::number_of_non_daemon_threads());
1335 workers()->update_active_workers(n_workers);
1336 log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
1337
1338 ParRebuildRSTask rebuild_rs_task(this);
1339 workers()->run_task(&rebuild_rs_task);
1340
1341 // Rebuild the strong code root lists for each region
1342 rebuild_strong_code_roots();
1343
1344 if (true) { // FIXME
1345 MetaspaceGC::compute_new_size();
1346 }
1347
1348 #ifdef TRACESPINNING
1349 ParallelTaskTerminator::print_termination_counts();
1350 #endif
1351
1352 // Discard all rset updates
1353 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1354 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1355
1356 // At this point there should be no regions in the
1357 // entire heap tagged as young.
1358 assert(check_young_list_empty(), "young list should be empty at this point");
1359
1360 // Update the number of full collections that have been completed.
1361 increment_old_marking_cycles_completed(false /* concurrent */);
1362
1363 _hrm.verify_optional();
1364 _verifier->verify_region_sets_optional();
1365
1366 _verifier->verify_after_gc();
1367
1368 // Clear the previous marking bitmap, if needed for bitmap verification.
1369 // Note we cannot do this when we clear the next marking bitmap in
1370 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371 // objects marked during a full GC against the previous bitmap.
1372 // But we need to clear it before calling check_bitmaps below since
1373 // the full GC has compacted objects and updated TAMS but not updated
1815
1816 // Reserve the maximum.
1817
1818 // When compressed oops are enabled, the preferred heap base
1819 // is calculated by subtracting the requested size from the
1820 // 32Gb boundary and using the result as the base address for
1821 // heap reservation. If the requested size is not aligned to
1822 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1823 // into the ReservedHeapSpace constructor) then the actual
1824 // base of the reserved heap may end up differing from the
1825 // address that was requested (i.e. the preferred heap base).
1826 // If this happens then we could end up using a non-optimal
1827 // compressed oops mode.
1828
1829 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1830 heap_alignment);
1831
1832 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1833
1834 // Create the barrier set for the entire reserved region.
1835 G1CardTable* ct = new G1CardTable(reserved_region());
1836 ct->initialize();
1837 G1BarrierSet* bs = new G1BarrierSet(ct);
1838 bs->initialize();
1839 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1840 set_barrier_set(bs);
1841
1842 // Create the hot card cache.
1843 _hot_card_cache = new G1HotCardCache(this);
1844
1845 // Also create a G1 rem set.
1846 _g1_rem_set = new G1RemSet(this, ct, _hot_card_cache);
1847
1848 // Carve out the G1 part of the heap.
1849 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1850 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1851 G1RegionToSpaceMapper* heap_storage =
1852 G1RegionToSpaceMapper::create_mapper(g1_rs,
1853 g1_rs.size(),
1854 page_size,
1855 HeapRegion::GrainBytes,
1856 1,
1857 mtJavaHeap);
1858 os::trace_page_sizes("Heap",
1859 collector_policy()->min_heap_byte_size(),
1860 max_byte_size,
1861 page_size,
1862 heap_rs.base(),
1863 heap_rs.size());
1864 heap_storage->set_mapping_changed_listener(&_listener);
1865
1866 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1867 G1RegionToSpaceMapper* bot_storage =
1868 create_aux_memory_mapper("Block Offset Table",
1869 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1870 G1BlockOffsetTable::heap_map_factor());
1871
1872 ReservedSpace cardtable_rs(G1CardTable::compute_size(g1_rs.size() / HeapWordSize));
1873 G1RegionToSpaceMapper* cardtable_storage =
1874 create_aux_memory_mapper("Card Table",
1875 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1876 G1CardTable::heap_map_factor());
1877
1878 G1RegionToSpaceMapper* card_counts_storage =
1879 create_aux_memory_mapper("Card Counts Table",
1880 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1881 G1CardCounts::heap_map_factor());
1882
1883 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1884 G1RegionToSpaceMapper* prev_bitmap_storage =
1885 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1886 G1RegionToSpaceMapper* next_bitmap_storage =
1887 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1888
1889 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1890 g1_card_table()->initialize(cardtable_storage);
1891 // Do later initialization work for concurrent refinement.
1892 _hot_card_cache->initialize(card_counts_storage);
1893
1894 // 6843694 - ensure that the maximum region index can fit
1895 // in the remembered set structures.
1896 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1897 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1898
1899 g1_rem_set()->initialize(max_capacity(), max_regions());
1900
1901 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1902 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1903 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1904 "too many cards per region");
1905
1906 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1907
1908 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1909
1910 {
1917 }
1918
1919 // Create the G1ConcurrentMark data structure and thread.
1920 // (Must do this late, so that "max_regions" is defined.)
1921 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1922 if (_cm == NULL || !_cm->completed_initialization()) {
1923 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1924 return JNI_ENOMEM;
1925 }
1926 _cmThread = _cm->cmThread();
1927
1928 // Now expand into the initial heap size.
1929 if (!expand(init_byte_size, _workers)) {
1930 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1931 return JNI_ENOMEM;
1932 }
1933
1934 // Perform any initialization actions delegated to the policy.
1935 g1_policy()->init(this, &_collection_set);
1936
1937 G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1938 SATB_Q_FL_lock,
1939 G1SATBProcessCompletedThreshold,
1940 Shared_SATB_Q_lock);
1941
1942 G1BarrierSet::dirty_card_queue_set().initialize(_refine_cte_cl,
1943 DirtyCardQ_CBL_mon,
1944 DirtyCardQ_FL_lock,
1945 (int)concurrent_g1_refine()->yellow_zone(),
1946 (int)concurrent_g1_refine()->red_zone(),
1947 Shared_DirtyCardQ_lock,
1948 NULL, // fl_owner
1949 true); // init_free_ids
1950
1951 dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1952 DirtyCardQ_CBL_mon,
1953 DirtyCardQ_FL_lock,
1954 -1, // never trigger processing
1955 -1, // no limit on length
1956 Shared_DirtyCardQ_lock,
1957 &G1BarrierSet::dirty_card_queue_set());
1958
1959 // Here we allocate the dummy HeapRegion that is required by the
1960 // G1AllocRegion class.
1961 HeapRegion* dummy_region = _hrm.get_dummy_region();
1962
1963 // We'll re-use the same region whether the alloc region will
1964 // require BOT updates or not and, if it doesn't, then a non-young
1965 // region will complain that it cannot support allocations without
1966 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1967 dummy_region->set_eden();
1968 // Make sure it's full.
1969 dummy_region->set_top(dummy_region->end());
1970 G1AllocRegion::setup(this, dummy_region);
1971
1972 _allocator->init_mutator_alloc_region();
1973
1974 // Do create of the monitoring and management support so that
1975 // values in the heap have been properly initialized.
1976 _g1mm = new G1MonitoringSupport(this);
1977
1978 G1StringDedup::initialize();
1979
1980 _preserved_marks_set.init(ParallelGCThreads);
1981
1982 _collection_set.initialize(max_regions());
1983
1984 return JNI_OK;
1985 }
1986
1987 void G1CollectedHeap::stop() {
1988 // Stop all concurrent threads. We do this to make sure these threads
1989 // do not continue to execute and access resources (e.g. logging)
1990 // that are destroyed during shutdown.
1991 _cg1r->stop();
1992 _cmThread->stop();
1993 if (G1StringDedup::is_enabled()) {
1994 G1StringDedup::stop();
1995 }
1996 }
1997
1998 void G1CollectedHeap::safepoint_synchronize_begin() {
1999 SuspendibleThreadSet::synchronize();
2000 }
2001
2002 void G1CollectedHeap::safepoint_synchronize_end() {
2003 SuspendibleThreadSet::desynchronize();
2004 }
2005
2006 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2007 return HeapRegion::max_region_size();
2008 }
2009
2010 void G1CollectedHeap::post_initialize() {
2011 ref_processing_init();
2012 }
2013
2014 void G1CollectedHeap::ref_processing_init() {
2015 // Reference processing in G1 currently works as follows:
2016 //
2017 // * There are two reference processor instances. One is
2018 // used to record and process discovered references
2019 // during concurrent marking; the other is used to
2020 // record and process references during STW pauses
2021 // (both full and incremental).
2022 // * Both ref processors need to 'span' the entire heap as
2023 // the regions in the collection set may be dotted around.
2024 //
2025 // * For the concurrent marking ref processor:
2115 _failures = true;
2116 }
2117 return false;
2118 }
2119
2120 bool failures() { return _failures; }
2121 };
2122
2123 void G1CollectedHeap::check_gc_time_stamps() {
2124 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2125 heap_region_iterate(&cl);
2126 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2127 }
2128 #endif // PRODUCT
2129
2130 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2131 _hot_card_cache->drain(cl, worker_i);
2132 }
2133
2134 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
2135 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2136 size_t n_completed_buffers = 0;
2137 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2138 n_completed_buffers++;
2139 }
2140 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2141 dcqs.clear_n_completed_buffers();
2142 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2143 }
2144
2145 // Computes the sum of the storage used by the various regions.
2146 size_t G1CollectedHeap::used() const {
2147 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2148 if (_archive_allocator != NULL) {
2149 result += _archive_allocator->used();
2150 }
2151 return result;
2152 }
2153
2154 size_t G1CollectedHeap::used_unlocked() const {
2155 return _summary_bytes_used;
2491 void G1CollectedHeap::prepare_for_verify() {
2492 _verifier->prepare_for_verify();
2493 }
2494
2495 void G1CollectedHeap::verify(VerifyOption vo) {
2496 _verifier->verify(vo);
2497 }
2498
2499 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2500 return true;
2501 }
2502
2503 const char* const* G1CollectedHeap::concurrent_phases() const {
2504 return _cmThread->concurrent_phases();
2505 }
2506
2507 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2508 return _cmThread->request_concurrent_phase(phase);
2509 }
2510
2511 void G1CollectedHeap::verify_nmethod_roots(nmethod* nmethod) {
2512
2513 }
2514
2515 class PrintRegionClosure: public HeapRegionClosure {
2516 outputStream* _st;
2517 public:
2518 PrintRegionClosure(outputStream* st) : _st(st) {}
2519 bool doHeapRegion(HeapRegion* r) {
2520 r->print_on(_st);
2521 return false;
2522 }
2523 };
2524
2525 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2526 const HeapRegion* hr,
2527 const VerifyOption vo) const {
2528 switch (vo) {
2529 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2530 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2531 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
2532 default: ShouldNotReachHere();
2533 }
2534 return false; // keep some compilers happy
2773 return result;
2774 }
2775
2776 void
2777 G1CollectedHeap::doConcurrentMark() {
2778 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2779 if (!_cmThread->in_progress()) {
2780 _cmThread->set_started();
2781 CGC_lock->notify();
2782 }
2783 }
2784
2785 size_t G1CollectedHeap::pending_card_num() {
2786 size_t extra_cards = 0;
2787 JavaThread *curr = Threads::first();
2788 while (curr != NULL) {
2789 DirtyCardQueue& dcq = curr->dirty_card_queue();
2790 extra_cards += dcq.size();
2791 curr = curr->next();
2792 }
2793 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2794 size_t buffer_size = dcqs.buffer_size();
2795 size_t buffer_num = dcqs.completed_buffers_num();
2796
2797 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
2798 // in bytes - not the number of 'entries'. We need to convert
2799 // into a number of cards.
2800 return (buffer_size * buffer_num + extra_cards) / oopSize;
2801 }
2802
2803 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2804 private:
2805 size_t _total_humongous;
2806 size_t _candidate_humongous;
2807
2808 DirtyCardQueue _dcq;
2809
2810 // We don't nominate objects with many remembered set entries, on
2811 // the assumption that such objects are likely still live.
2812 bool is_remset_small(HeapRegion* region) const {
2813 HeapRegionRemSet* const rset = region->rem_set();
2853 // set entries on other regions. In order to reclaim such an
2854 // object, those remembered sets would need to be cleaned up.
2855 //
2856 // We also treat is_typeArray() objects specially, allowing them
2857 // to be reclaimed even if allocated before the start of
2858 // concurrent mark. For this we rely on mark stack insertion to
2859 // exclude is_typeArray() objects, preventing reclaiming an object
2860 // that is in the mark stack. We also rely on the metadata for
2861 // such objects to be built-in and so ensured to be kept live.
2862 // Frequent allocation and drop of large binary blobs is an
2863 // important use case for eager reclaim, and this special handling
2864 // may reduce needed headroom.
2865
2866 return obj->is_typeArray() && is_remset_small(region);
2867 }
2868
2869 public:
2870 RegisterHumongousWithInCSetFastTestClosure()
2871 : _total_humongous(0),
2872 _candidate_humongous(0),
2873 _dcq(&G1BarrierSet::dirty_card_queue_set()) {
2874 }
2875
2876 virtual bool doHeapRegion(HeapRegion* r) {
2877 if (!r->is_starts_humongous()) {
2878 return false;
2879 }
2880 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2881
2882 bool is_candidate = humongous_region_is_candidate(g1h, r);
2883 uint rindex = r->hrm_index();
2884 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2885 if (is_candidate) {
2886 _candidate_humongous++;
2887 g1h->register_humongous_region_with_cset(rindex);
2888 // Is_candidate already filters out humongous object with large remembered sets.
2889 // If we have a humongous object with a few remembered sets, we simply flush these
2890 // remembered set entries into the DCQS. That will result in automatic
2891 // re-evaluation of their remembered set entries during the following evacuation
2892 // phase.
2893 if (!r->rem_set()->is_empty()) {
2894 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2895 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2896 G1CardTable* ct = g1h->g1_card_table();
2897 HeapRegionRemSetIterator hrrs(r->rem_set());
2898 size_t card_index;
2899 while (hrrs.has_next(card_index)) {
2900 jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
2901 // The remembered set might contain references to already freed
2902 // regions. Filter out such entries to avoid failing card table
2903 // verification.
2904 if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
2905 if (*card_ptr != G1CardTable::dirty_card_val()) {
2906 *card_ptr = G1CardTable::dirty_card_val();
2907 _dcq.enqueue(card_ptr);
2908 }
2909 }
2910 }
2911 assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2912 "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2913 hrrs.n_yielded(), r->rem_set()->occupied());
2914 r->rem_set()->clear_locked();
2915 }
2916 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2917 }
2918 _total_humongous++;
2919
2920 return false;
2921 }
2922
2923 size_t total_humongous() const { return _total_humongous; }
2924 size_t candidate_humongous() const { return _candidate_humongous; }
2925
2926 void flush_rem_set_entries() { _dcq.flush(); }
3930 _queue(queue), _g1h(g1h) { }
3931
3932 virtual void work(uint worker_id) {
3933 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3934 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3935
3936 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3937 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3938
3939 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3940 }
3941 };
3942
3943 void G1CollectedHeap::redirty_logged_cards() {
3944 double redirty_logged_cards_start = os::elapsedTime();
3945
3946 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3947 dirty_card_queue_set().reset_for_par_iteration();
3948 workers()->run_task(&redirty_task);
3949
3950 DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3951 dcq.merge_bufferlists(&dirty_card_queue_set());
3952 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3953
3954 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3955 }
3956
3957 // Weak Reference Processing support
3958
3959 // An always "is_alive" closure that is used to preserve referents.
3960 // If the object is non-null then it's alive. Used in the preservation
3961 // of referent objects that are pointed to by reference objects
3962 // discovered by the CM ref processor.
3963 class G1AlwaysAliveClosure: public BoolObjectClosure {
3964 G1CollectedHeap* _g1;
3965 public:
3966 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3967 bool do_object_b(oop p) {
3968 if (p != NULL) {
3969 return true;
3970 }
5456 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5457 HeapRegion* hr = _g1h->heap_region_containing(obj);
5458 assert(!hr->is_continues_humongous(),
5459 "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5460 " starting at " HR_FORMAT,
5461 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5462
5463 hr->remove_strong_code_root(_nm);
5464 }
5465 }
5466
5467 public:
5468 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5469 _g1h(g1h), _nm(nm) {}
5470
5471 void do_oop(oop* p) { do_oop_work(p); }
5472 void do_oop(narrowOop* p) { do_oop_work(p); }
5473 };
5474
5475 void G1CollectedHeap::register_nmethod(nmethod* nm) {
5476 assert_locked_or_safepoint(CodeCache_lock);
5477 guarantee(nm != NULL, "sanity");
5478 RegisterNMethodOopClosure reg_cl(this, nm);
5479 nm->oops_do(®_cl);
5480 }
5481
5482 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
5483 assert_locked_or_safepoint(CodeCache_lock);
5484 guarantee(nm != NULL, "sanity");
5485 UnregisterNMethodOopClosure reg_cl(this, nm);
5486 nm->oops_do(®_cl, true);
5487 }
5488
5489 void G1CollectedHeap::purge_code_root_memory() {
5490 double purge_start = os::elapsedTime();
5491 G1CodeRootSet::purge();
5492 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
5493 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
5494 }
5495
5496 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
5497 G1CollectedHeap* _g1h;
5498
5499 public:
5500 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5501 _g1h(g1h) {}
5502
5503 void do_code_blob(CodeBlob* cb) {
|