--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-11-16 14:39:41.949787326 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-11-16 14:39:41.512773858 +0100 @@ -1016,7 +1016,6 @@ // Make sure we'll choose a new allocation region afterwards. _allocator->release_mutator_alloc_region(); _allocator->abandon_gc_alloc_regions(); - g1_rem_set()->cleanupHRRS(); // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the @@ -2968,13 +2967,6 @@ evacuation_info.set_collectionset_regions(collection_set()->region_length()); - // Make sure the remembered sets are up to date. This needs to be - // done before register_humongous_regions_with_cset(), because the - // remembered sets are used there to choose eager reclaim candidates. - // If the remembered sets are not up to date we might miss some - // entries that need to be handled. - g1_rem_set()->cleanupHRRS(); - register_humongous_regions_with_cset(); assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table."); --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2018-11-16 14:39:43.674840491 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2018-11-16 14:39:43.235826961 +0100 @@ -60,7 +60,6 @@ // Forward declarations class HeapRegion; -class HRRSCleanupTask; class GenerationSpec; class G1ParScanThreadState; class G1ParScanThreadStateSet; --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-11-16 14:39:45.342891899 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-11-16 14:39:44.897878184 +0100 @@ -1220,18 +1220,15 @@ FreeRegionList* _local_cleanup_list; uint _old_regions_removed; uint _humongous_regions_removed; - HRRSCleanupTask* _hrrs_cleanup_task; public: G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, - FreeRegionList* local_cleanup_list, - HRRSCleanupTask* hrrs_cleanup_task) : + FreeRegionList* local_cleanup_list) : _g1h(g1h), _freed_bytes(0), _local_cleanup_list(local_cleanup_list), _old_regions_removed(0), - _humongous_regions_removed(0), - _hrrs_cleanup_task(hrrs_cleanup_task) { } + _humongous_regions_removed(0){ } size_t freed_bytes() { return _freed_bytes; } const uint old_regions_removed() { return _old_regions_removed; } @@ -1251,8 +1248,6 @@ hr->clear_cardtable(); _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); - } else { - hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); } return false; @@ -1269,16 +1264,11 @@ _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { - - HeapRegionRemSet::reset_for_cleanup_tasks(); } void work(uint worker_id) { FreeRegionList local_cleanup_list("Local Cleanup List"); - HRRSCleanupTask hrrs_cleanup_task; - G1ReclaimEmptyRegionsClosure cl(_g1h, - &local_cleanup_list, - &hrrs_cleanup_task); + G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); assert(cl.is_complete(), "Shouldn't have aborted!"); @@ -1290,8 +1280,6 @@ _cleanup_list->add_ordered(&local_cleanup_list); assert(local_cleanup_list.is_empty(), "post-condition"); - - HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); } } }; --- old/src/hotspot/share/gc/g1/g1RemSet.cpp 2018-11-16 14:39:47.017943523 +0100 +++ new/src/hotspot/share/gc/g1/g1RemSet.cpp 2018-11-16 14:39:46.582930116 +0100 @@ -512,10 +512,6 @@ } } -void G1RemSet::cleanupHRRS() { - HeapRegionRemSet::cleanup(); -} - void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) { update_rem_set(pss, worker_i); scan_rem_set(pss, worker_i);; --- old/src/hotspot/share/gc/g1/g1RemSet.hpp 2018-11-16 14:39:48.675994623 +0100 +++ new/src/hotspot/share/gc/g1/g1RemSet.hpp 2018-11-16 14:39:48.226980784 +0100 @@ -86,11 +86,6 @@ // Initialize data that depends on the heap size being known. void initialize(size_t capacity, uint max_regions); - // This is called to reset dual hash tables after the gc pause - // is finished and the initial hash table is no longer being - // scanned. - void cleanupHRRS(); - G1RemSet(G1CollectedHeap* g1h, G1CardTable* ct, G1HotCardCache* hot_card_cache); --- old/src/hotspot/share/gc/g1/heapRegionRemSet.cpp 2018-11-16 14:39:50.325045445 +0100 +++ new/src/hotspot/share/gc/g1/heapRegionRemSet.cpp 2018-11-16 14:39:49.885031884 +0100 @@ -601,11 +601,6 @@ } } -void -OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { - _sparse_table.do_cleanup_work(hrrs_cleanup_task); -} - HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot, HeapRegion* hr) : _bot(bot), @@ -635,10 +630,6 @@ guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity"); } -void HeapRegionRemSet::cleanup() { - SparsePRT::cleanup_all(); -} - void HeapRegionRemSet::clear(bool only_cardset) { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); clear_locked(only_cardset); @@ -822,18 +813,6 @@ return false; } -void HeapRegionRemSet::reset_for_cleanup_tasks() { - SparsePRT::reset_for_cleanup_tasks(); -} - -void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { - _other_regions.do_cleanup_work(hrrs_cleanup_task); -} - -void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) { - SparsePRT::finish_cleanup_task(hrrs_cleanup_task); -} - #ifndef PRODUCT void HeapRegionRemSet::test() { os::sleep(Thread::current(), (jlong)5000, false); --- old/src/hotspot/share/gc/g1/heapRegionRemSet.hpp 2018-11-16 14:39:51.963095928 +0100 +++ new/src/hotspot/share/gc/g1/heapRegionRemSet.hpp 2018-11-16 14:39:51.526082460 +0100 @@ -42,11 +42,6 @@ class SparsePRT; class nmethod; -// Essentially a wrapper around SparsePRTCleanupTask. See -// sparsePRT.hpp for more details. -class HRRSCleanupTask : public SparsePRTCleanupTask { -}; - // The "_coarse_map" is a bitmap with one bit for each region, where set // bits indicate that the corresponding region may contain some pointer // into the owning region. @@ -159,8 +154,6 @@ // Clear the entire contents of this remembered set. void clear(); - - void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); }; class HeapRegionRemSet : public CHeapObj { @@ -340,9 +333,6 @@ // consumed by the strong code roots. size_t strong_code_roots_mem_size(); - // Called during a stop-world phase to perform any deferred cleanups. - static void cleanup(); - static void invalidate_from_card_cache(uint start_idx, size_t num_regions) { G1FromCardCache::invalidate(start_idx, num_regions); } @@ -351,16 +341,7 @@ static void print_from_card_cache() { G1FromCardCache::print(); } -#endif - - // These are wrappers for the similarly-named methods on - // SparsePRT. Look at sparsePRT.hpp for more details. - static void reset_for_cleanup_tasks(); - void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); - static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task); - // Run unit tests. -#ifndef PRODUCT static void test(); #endif }; --- old/src/hotspot/share/gc/g1/sparsePRT.cpp 2018-11-16 14:39:53.603146473 +0100 +++ new/src/hotspot/share/gc/g1/sparsePRT.cpp 2018-11-16 14:39:53.166133005 +0100 @@ -287,165 +287,55 @@ // ---------------------------------------------------------------------- -SparsePRT* volatile SparsePRT::_head_expanded_list = NULL; - -void SparsePRT::add_to_expanded_list(SparsePRT* sprt) { - // We could expand multiple times in a pause -- only put on list once. - if (sprt->expanded()) return; - sprt->set_expanded(true); - SparsePRT* hd = _head_expanded_list; - while (true) { - sprt->_next_expanded = hd; - SparsePRT* res = Atomic::cmpxchg(sprt, &_head_expanded_list, hd); - if (res == hd) return; - else hd = res; - } -} - - -SparsePRT* SparsePRT::get_from_expanded_list() { - SparsePRT* hd = _head_expanded_list; - while (hd != NULL) { - SparsePRT* next = hd->next_expanded(); - SparsePRT* res = Atomic::cmpxchg(next, &_head_expanded_list, hd); - if (res == hd) { - hd->set_next_expanded(NULL); - return hd; - } else { - hd = res; - } - } - return NULL; -} - -void SparsePRT::reset_for_cleanup_tasks() { - _head_expanded_list = NULL; -} - -void SparsePRT::do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task) { - if (should_be_on_expanded_list()) { - sprt_cleanup_task->add(this); - } -} - -void SparsePRT::finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task) { - assert(ParGCRareEvent_lock->owned_by_self(), "pre-condition"); - SparsePRT* head = sprt_cleanup_task->head(); - SparsePRT* tail = sprt_cleanup_task->tail(); - if (head != NULL) { - assert(tail != NULL, "if head is not NULL, so should tail"); - - tail->set_next_expanded(_head_expanded_list); - _head_expanded_list = head; - } else { - assert(tail == NULL, "if head is NULL, so should tail"); - } -} - -bool SparsePRT::should_be_on_expanded_list() { - if (_expanded) { - assert(_cur != _next, "if _expanded is true, cur should be != _next"); - } else { - assert(_cur == _next, "if _expanded is false, cur should be == _next"); - } - return expanded(); -} - -void SparsePRT::cleanup_all() { - // First clean up all expanded tables so they agree on next and cur. - SparsePRT* sprt = get_from_expanded_list(); - while (sprt != NULL) { - sprt->cleanup(); - sprt = get_from_expanded_list(); - } -} - - SparsePRT::SparsePRT() : - _expanded(false), _next_expanded(NULL) -{ - _cur = new RSHashTable(InitialCapacity); - _next = _cur; + _table(new RSHashTable(InitialCapacity)) { } SparsePRT::~SparsePRT() { - assert(_next != NULL && _cur != NULL, "Inv"); - if (_cur != _next) { delete _cur; } - delete _next; + delete _table; } size_t SparsePRT::mem_size() const { // We ignore "_cur" here, because it either = _next, or else it is // on the deleted list. - return sizeof(SparsePRT) + _next->mem_size(); + return sizeof(SparsePRT) + _table->mem_size(); } bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { - if (_next->should_expand()) { + if (_table->should_expand()) { expand(); } - return _next->add_card(region_id, card_index); + return _table->add_card(region_id, card_index); } SparsePRTEntry* SparsePRT::get_entry(RegionIdx_t region_id) { - return _next->get_entry(region_id); + return _table->get_entry(region_id); } bool SparsePRT::delete_entry(RegionIdx_t region_id) { - return _next->delete_entry(region_id); + return _table->delete_entry(region_id); } void SparsePRT::clear() { - // If they differ, _next is bigger then cur, so next has no chance of - // being the initial size. - if (_next != _cur) { - delete _next; - } - - if (_cur->capacity() != InitialCapacity) { - delete _cur; - _cur = new RSHashTable(InitialCapacity); + // If the entry table is not at initial capacity, just create a new one. + if (_table->capacity() != InitialCapacity) { + delete _table; + _table = new RSHashTable(InitialCapacity); } else { - _cur->clear(); + _table->clear(); } - _next = _cur; - _expanded = false; -} - -void SparsePRT::cleanup() { - // Make sure that the current and next tables agree. - if (_cur != _next) { - delete _cur; - } - _cur = _next; - set_expanded(false); } void SparsePRT::expand() { - RSHashTable* last = _next; - _next = new RSHashTable(last->capacity() * 2); + RSHashTable* last = _table; + _table = new RSHashTable(last->capacity() * 2); for (size_t i = 0; i < last->num_entries(); i++) { SparsePRTEntry* e = last->entry((int)i); if (e->valid_entry()) { - _next->add_entry(e); + _table->add_entry(e); } } - if (last != _cur) { - delete last; - } - add_to_expanded_list(this); -} - -void SparsePRTCleanupTask::add(SparsePRT* sprt) { - assert(sprt->should_be_on_expanded_list(), "pre-condition"); - - sprt->set_next_expanded(NULL); - if (_tail != NULL) { - _tail->set_next_expanded(sprt); - } else { - _head = sprt; - } - _tail = sprt; + delete last; } --- old/src/hotspot/share/gc/g1/sparsePRT.hpp 2018-11-16 14:39:55.258197481 +0100 +++ new/src/hotspot/share/gc/g1/sparsePRT.hpp 2018-11-16 14:39:54.821184012 +0100 @@ -37,12 +37,6 @@ // indices of other regions to short sequences of cards in the other region // that might contain pointers into the owner region. -// These tables only expand while they are accessed in parallel -- -// deletions may be done in single-threaded code. This allows us to allow -// unsynchronized reads/iterations, as long as expansions caused by -// insertions only enqueue old versions for deletions, but do not delete -// old versions synchronously. - class SparsePRTEntry: public CHeapObj { private: // The type of a card entry. @@ -220,16 +214,11 @@ // Concurrent access to a SparsePRT must be serialized by some external mutex. class SparsePRTIter; -class SparsePRTCleanupTask; class SparsePRT { - friend class SparsePRTCleanupTask; + friend class SparsePRTIter; - // Iterations are done on the _cur hash table, since they only need to - // see entries visible at the start of a collection pause. - // All other operations are done using the _next hash table. - RSHashTable* _cur; - RSHashTable* _next; + RSHashTable* _table; enum SomeAdditionalPrivateConstants { InitialCapacity = 16 @@ -237,26 +226,11 @@ void expand(); - bool _expanded; - - bool expanded() { return _expanded; } - void set_expanded(bool b) { _expanded = b; } - - SparsePRT* _next_expanded; - - SparsePRT* next_expanded() { return _next_expanded; } - void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; } - - bool should_be_on_expanded_list(); - - static SparsePRT* volatile _head_expanded_list; - public: SparsePRT(); - ~SparsePRT(); - size_t occupied() const { return _next->occupied_cards(); } + size_t occupied() const { return _table->occupied_cards(); } size_t mem_size() const; // Attempts to ensure that the given card_index in the given region is in @@ -277,72 +251,19 @@ // Clear the table, and reinitialize to initial capacity. void clear(); - // Ensure that "_cur" and "_next" point to the same table. - void cleanup(); - - // Clean up all tables on the expanded list. Called single threaded. - static void cleanup_all(); - RSHashTable* cur() const { return _cur; } - - static void add_to_expanded_list(SparsePRT* sprt); - static SparsePRT* get_from_expanded_list(); - - // The purpose of these three methods is to help the GC workers - // during the cleanup pause to recreate the expanded list, purging - // any tables from it that belong to regions that are freed during - // cleanup (if we don't purge those tables, there is a race that - // causes various crashes; see CR 7014261). - // - // We chose to recreate the expanded list, instead of purging - // entries from it by iterating over it, to avoid this serial phase - // at the end of the cleanup pause. - // - // The three methods below work as follows: - // * reset_for_cleanup_tasks() : Nulls the expanded list head at the - // start of the cleanup pause. - // * do_cleanup_work() : Called by the cleanup workers for every - // region that is not free / is being freed by the cleanup - // pause. It creates a list of expanded tables whose head / tail - // are on the thread-local SparsePRTCleanupTask object. - // * finish_cleanup_task() : Called by the cleanup workers after - // they complete their cleanup task. It adds the local list into - // the global expanded list. It assumes that the - // ParGCRareEvent_lock is being held to ensure MT-safety. - static void reset_for_cleanup_tasks(); - void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task); - static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task); - bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const { - return _next->contains_card(region_id, card_index); + return _table->contains_card(region_id, card_index); } }; class SparsePRTIter: public RSHashTableIter { public: SparsePRTIter(const SparsePRT* sprt) : - RSHashTableIter(sprt->cur()) {} + RSHashTableIter(sprt->_table) {} bool has_next(size_t& card_index) { return RSHashTableIter::has_next(card_index); } }; -// This allows each worker during a cleanup pause to create a -// thread-local list of sparse tables that have been expanded and need -// to be processed at the beginning of the next GC pause. This lists -// are concatenated into the single expanded list at the end of the -// cleanup pause. -class SparsePRTCleanupTask { -private: - SparsePRT* _head; - SparsePRT* _tail; - -public: - SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { } - - void add(SparsePRT* sprt); - SparsePRT* head() { return _head; } - SparsePRT* tail() { return _tail; } -}; - #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP