--- old/src/hotspot/share/gc/cms/cmsCardTable.cpp 2018-04-05 22:38:28.290121652 +0200 +++ new/src/hotspot/share/gc/cms/cmsCardTable.cpp 2018-04-05 22:38:28.074121660 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/cms/cmsCardTable.hpp" #include "gc/cms/cmsHeap.hpp" #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTableRS.hpp" @@ -36,7 +37,24 @@ #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" -void CardTableRS:: +CMSCardTable::CMSCardTable(MemRegion whole_heap, bool scanned_concurrently) : + CardTableRS(whole_heap, scanned_concurrently) { +} + +// Returns the number of chunks necessary to cover "mr". +size_t CMSCardTable::chunks_to_cover(MemRegion mr) { + return (size_t)(addr_to_chunk_index(mr.last()) - + addr_to_chunk_index(mr.start()) + 1); +} + +// Returns the index of the chunk in a stride which +// covers the given address. +uintptr_t CMSCardTable::addr_to_chunk_index(const void* addr) { + uintptr_t card = (uintptr_t) byte_for(addr); + return card / ParGCCardsPerStrideChunk; +} + +void CMSCardTable:: non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, OopsInGenClosure* cl, CardTableRS* ct, @@ -82,7 +100,7 @@ } void -CardTableRS:: +CMSCardTable:: process_stride(Space* sp, MemRegion used, jint stride, int n_strides, @@ -162,7 +180,7 @@ } void -CardTableRS:: +CMSCardTable:: process_chunk_boundaries(Space* sp, DirtyCardToOopClosure* dcto_cl, MemRegion chunk_mr, @@ -371,7 +389,7 @@ } void -CardTableRS:: +CMSCardTable:: get_LNC_array_for_space(Space* sp, jbyte**& lowest_non_clean, uintptr_t& lowest_non_clean_base_chunk_index, @@ -430,3 +448,26 @@ lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; } + +#ifdef ASSERT +void CMSCardTable::verify_used_region_at_save_marks(Space* sp) const { + MemRegion ur = sp->used_region(); + MemRegion urasm = sp->used_region_at_save_marks(); + + if (!ur.contains(urasm)) { + log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " + "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " + "[" PTR_FORMAT ", " PTR_FORMAT ")", + p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); + MemRegion ur2 = sp->used_region(); + MemRegion urasm2 = sp->used_region_at_save_marks(); + if (!ur.equals(ur2)) { + log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); + } + if (!urasm.equals(urasm2)) { + log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); + } + ShouldNotReachHere(); + } +} +#endif // ASSERT --- old/src/hotspot/share/gc/cms/cmsHeap.cpp 2018-04-05 22:38:28.658121640 +0200 +++ new/src/hotspot/share/gc/cms/cmsHeap.cpp 2018-04-05 22:38:28.442121647 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/cms/cmsCardTable.hpp" #include "gc/cms/compactibleFreeListSpace.hpp" #include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/cms/concurrentMarkSweepThread.hpp" @@ -90,6 +91,11 @@ return JNI_OK; } +CardTableRS* CMSHeap::create_rem_set(const MemRegion& reserved_region) { + const bool scan_concurrently = CMSPrecleaningEnabled; + return new CMSCardTable(reserved_region, scan_concurrently); +} + void CMSHeap::initialize_serviceability() { _young_manager = new GCMemoryManager("ParNew", "end of minor GC"); _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC"); --- old/src/hotspot/share/gc/cms/cmsHeap.hpp 2018-04-05 22:38:29.006121627 +0200 +++ new/src/hotspot/share/gc/cms/cmsHeap.hpp 2018-04-05 22:38:28.798121635 +0200 @@ -51,6 +51,7 @@ // Returns JNI_OK on success virtual jint initialize(); + virtual CardTableRS* create_rem_set(const MemRegion& reserved_region); // Convenience function to be used in situations where the heap type can be // asserted to be this type. --- old/src/hotspot/share/gc/shared/cardTableRS.cpp 2018-04-05 22:38:29.350121615 +0200 +++ new/src/hotspot/share/gc/shared/cardTableRS.cpp 2018-04-05 22:38:29.134121623 +0200 @@ -279,36 +279,24 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads) { + verify_used_region_at_save_marks(sp); + const MemRegion urasm = sp->used_region_at_save_marks(); + non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); +} + #ifdef ASSERT - // Convert the assertion check to a warning if we are running - // CMS+ParNew until related bug is fixed. +void CardTableRS::verify_used_region_at_save_marks(Space* sp) const { MemRegion ur = sp->used_region(); - assert(ur.contains(urasm) || (UseConcMarkSweepGC), + MemRegion urasm = sp->used_region_at_save_marks(); + + assert(ur.contains(urasm), "Did you forget to call save_marks()? " "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " "[" PTR_FORMAT ", " PTR_FORMAT ")", p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); - // In the case of CMS+ParNew, issue a warning - if (!ur.contains(urasm)) { - assert(UseConcMarkSweepGC, "Tautology: see assert above"); - log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " - "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " - "[" PTR_FORMAT ", " PTR_FORMAT ")", - p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); - MemRegion ur2 = sp->used_region(); - MemRegion urasm2 = sp->used_region_at_save_marks(); - if (!ur.equals(ur2)) { - log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); - } - if (!urasm.equals(urasm2)) { - log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); - } - ShouldNotReachHere(); - } -#endif - non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); } +#endif void CardTableRS::clear_into_younger(Generation* old_gen) { assert(GenCollectedHeap::heap()->is_old_gen(old_gen), @@ -611,8 +599,8 @@ CardTable::verify(); } -CardTableRS::CardTableRS(MemRegion whole_heap) : - CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled), +CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) : + CardTable(whole_heap, scanned_concurrently), _cur_youngergen_card_val(youngergenP1_card), // LNC functionality _lowest_non_clean(NULL), @@ -698,11 +686,7 @@ { if (!mr.is_empty()) { if (n_threads > 0) { -#if INCLUDE_ALL_GCS non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); -#else // INCLUDE_ALL_GCS - fatal("Parallel gc not supported here."); -#endif // INCLUDE_ALL_GCS } else { // clear_cl finds contiguous dirty ranges of cards to process and clear. @@ -717,6 +701,12 @@ } } +void CardTableRS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, + OopsInGenClosure* cl, CardTableRS* ct, + uint n_threads) { + fatal("Parallel gc not supported here."); +} + bool CardTableRS::is_in_young(oop obj) const { return GenCollectedHeap::heap()->is_in_young(obj); } --- old/src/hotspot/share/gc/shared/cardTableRS.hpp 2018-04-05 22:38:29.714121603 +0200 +++ new/src/hotspot/share/gc/shared/cardTableRS.hpp 2018-04-05 22:38:29.502121610 +0200 @@ -99,13 +99,15 @@ jbyte find_unused_youngergenP_card_value(); public: - CardTableRS(MemRegion whole_heap); + CardTableRS(MemRegion whole_heap, bool scanned_concurrently); ~CardTableRS(); CLDRemSet* cld_rem_set() { return &_cld_rem_set; } void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); + virtual void verify_used_region_at_save_marks(Space* sp) const NOT_DEBUG_RETURN; + // Override. void prepare_for_younger_refs_iterate(bool parallel); @@ -174,9 +176,9 @@ // Work method used to implement non_clean_card_iterate_possibly_parallel() // above in the parallel case. - void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, - OopsInGenClosure* cl, CardTableRS* ct, - uint n_threads); + virtual void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, + OopsInGenClosure* cl, CardTableRS* ct, + uint n_threads); // This is an array, one element per covered region of the card table. // Each entry is itself an array, with one element per chunk in the @@ -190,53 +192,7 @@ uintptr_t* _lowest_non_clean_base_chunk_index; volatile int* _last_LNC_resizing_collection; - // Initializes "lowest_non_clean" to point to the array for the region - // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk - // index of the corresponding to the first element of that array. - // Ensures that these arrays are of sufficient size, allocating if necessary. - // May be called by several threads concurrently. - void get_LNC_array_for_space(Space* sp, - jbyte**& lowest_non_clean, - uintptr_t& lowest_non_clean_base_chunk_index, - size_t& lowest_non_clean_chunk_size); - - // Returns the number of chunks necessary to cover "mr". - size_t chunks_to_cover(MemRegion mr) { - return (size_t)(addr_to_chunk_index(mr.last()) - - addr_to_chunk_index(mr.start()) + 1); - } - - // Returns the index of the chunk in a stride which - // covers the given address. - uintptr_t addr_to_chunk_index(const void* addr) { - uintptr_t card = (uintptr_t) byte_for(addr); - return card / ParGCCardsPerStrideChunk; - } - - // Apply cl, which must either itself apply dcto_cl or be dcto_cl, - // to the cards in the stride (of n_strides) within the given space. - void process_stride(Space* sp, - MemRegion used, - jint stride, int n_strides, - OopsInGenClosure* cl, - CardTableRS* ct, - jbyte** lowest_non_clean, - uintptr_t lowest_non_clean_base_chunk_index, - size_t lowest_non_clean_chunk_size); - - // Makes sure that chunk boundaries are handled appropriately, by - // adjusting the min_done of dcto_cl, and by using a special card-table - // value to indicate how min_done should be set. - void process_chunk_boundaries(Space* sp, - DirtyCardToOopClosure* dcto_cl, - MemRegion chunk_mr, - MemRegion used, - jbyte** lowest_non_clean, - uintptr_t lowest_non_clean_base_chunk_index, - size_t lowest_non_clean_chunk_size); - virtual bool is_in_young(oop obj) const; - }; class ClearNoncleanCardWrapper: public MemRegionClosure { --- old/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2018-04-05 22:38:30.066121590 +0200 +++ new/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2018-04-05 22:38:29.850121598 +0200 @@ -110,7 +110,7 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - _rem_set = new CardTableRS(reserved_region()); + _rem_set = create_rem_set(reserved_region()); _rem_set->initialize(); CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set); bs->initialize(); @@ -127,6 +127,10 @@ return JNI_OK; } +CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) { + return new CardTableRS(reserved_region, false /* scan_concurrently */); +} + void GenCollectedHeap::initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size) { --- old/src/hotspot/share/gc/shared/genCollectedHeap.hpp 2018-04-05 22:38:30.438121578 +0200 +++ new/src/hotspot/share/gc/shared/genCollectedHeap.hpp 2018-04-05 22:38:30.226121585 +0200 @@ -167,6 +167,7 @@ // Returns JNI_OK on success virtual jint initialize(); + virtual CardTableRS* create_rem_set(const MemRegion& reserved_region); void initialize_size_policy(size_t init_eden_size, size_t init_promo_size,