--- old/src/share/vm/gc/shared/cardTableModRefBS.hpp 2017-04-25 16:45:04.031174182 +0200 +++ new/src/share/vm/gc/shared/cardTableModRefBS.hpp 2017-04-25 16:45:03.919174185 +0200 @@ -26,7 +26,8 @@ #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP #include "gc/shared/modRefBarrierSet.hpp" -#include "oops/oop.hpp" + +class CardTable; // This kind of "BarrierSet" allows a "CollectedHeap" to detect and // enumerate ref fields that have been modified (since the last @@ -44,292 +45,99 @@ // Some classes get to look at some private stuff. friend class VMStructs; protected: + // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 + // or INCLUDE_JVMCI is being used + bool _defer_initial_card_mark; + bool _can_elide_tlab_store_barriers; + CardTable* _card_table; - enum CardValues { - clean_card = -1, - // The mask contains zeros in places for all other values. - clean_card_mask = clean_card - 31, - - dirty_card = 0, - precleaned_card = 1, - claimed_card = 2, - deferred_card = 4, - last_card = 8, - CT_MR_BS_last_reserved = 16 - }; - - // a word's worth (row) of clean card values - static const intptr_t clean_card_row = (intptr_t)(-1); - - // The declaration order of these const fields is important; see the - // constructor before changing. - const MemRegion _whole_heap; // the region covered by the card table - size_t _guard_index; // index of very last element in the card - // table; it is set to a guard value - // (last_card) and should never be modified - size_t _last_valid_index; // index of the last valid element - const size_t _page_size; // page size used when mapping _byte_map - size_t _byte_map_size; // in bytes - jbyte* _byte_map; // the card marking array - - int _cur_covered_regions; - // The covered regions should be in address order. - MemRegion* _covered; - // The committed regions correspond one-to-one to the covered regions. - // They represent the card-table memory that has been committed to service - // the corresponding covered region. It may be that committed region for - // one covered region corresponds to a larger region because of page-size - // roundings. Thus, a committed region for one covered region may - // actually extend onto the card-table space for the next covered region. - MemRegion* _committed; - - // The last card is a guard card, and we commit the page for it so - // we can use the card for verification purposes. We make sure we never - // uncommit the MemRegion for that page. - MemRegion _guard_region; - - protected: - inline size_t compute_byte_map_size(); - - // Finds and return the index of the region, if any, to which the given - // region would be contiguous. If none exists, assign a new region and - // returns its index. Requires that no more than the maximum number of - // covered regions defined in the constructor are ever in use. - int find_covering_region_by_base(HeapWord* base); - - // Same as above, but finds the region containing the given address - // instead of starting at a given base address. - int find_covering_region_containing(HeapWord* addr); - - // Resize one of the regions covered by the remembered set. - virtual void resize_covered_region(MemRegion new_region); - - // Returns the leftmost end of a committed region corresponding to a - // covered region before covered region "ind", or else "NULL" if "ind" is - // the first covered region. - HeapWord* largest_prev_committed_end(int ind) const; - - // Returns the part of the region mr that doesn't intersect with - // any committed region other than self. Used to prevent uncommitting - // regions that are also committed by other regions. Also protects - // against uncommitting the guard region. - MemRegion committed_unique_to_self(int self, MemRegion mr) const; - - // Mapping from address to card marking array entry - jbyte* byte_for(const void* p) const { - assert(_whole_heap.contains(p), - "Attempt to access p = " PTR_FORMAT " out of bounds of " - " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; - assert(result >= _byte_map && result < _byte_map + _byte_map_size, - "out of bounds accessor for card marking array"); - return result; - } - - // The card table byte one after the card marking array - // entry for argument address. Typically used for higher bounds - // for loops iterating through the card table. - jbyte* byte_after(const void* p) const { - return byte_for(p) + 1; - } - - protected: - // Dirty the bytes corresponding to "mr" (not all of which must be - // covered.) - void dirty_MemRegion(MemRegion mr); - - // Clear (to clean_card) the bytes entirely contained within "mr" (not - // all of which must be covered.) - void clear_MemRegion(MemRegion mr); + CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti); public: - // Constants - enum SomePublicConstants { - card_shift = 9, - card_size = 1 << card_shift, - card_size_in_words = card_size / sizeof(HeapWord) - }; - - static int clean_card_val() { return clean_card; } - static int clean_card_mask_val() { return clean_card_mask; } - static int dirty_card_val() { return dirty_card; } - static int claimed_card_val() { return claimed_card; } - static int precleaned_card_val() { return precleaned_card; } - static int deferred_card_val() { return deferred_card; } - - virtual void initialize(); - - // *** Barrier set functions. - - bool has_write_ref_pre_barrier() { return false; } - - // Initialization utilities; covered_words is the size of the covered region - // in, um, words. - inline size_t cards_required(size_t covered_words) { - // Add one for a guard card, used to detect errors. - const size_t words = align_size_up(covered_words, card_size_in_words); - return words / card_size_in_words + 1; - } + CardTableModRefBS(CardTable* card_table); + ~CardTableModRefBS(); -protected: + void initialize(); - CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); - ~CardTableModRefBS(); + CardTable* card_table() const { return _card_table; } // Record a reference update. Note that these versions are precise! // The scanning code has to handle the fact that the write barrier may be // either precise or imprecise. We make non-virtual inline variants of // these functions here for performance. - void write_ref_field_work(oop obj, size_t offset, oop newVal); - virtual void write_ref_field_work(void* field, oop newVal, bool release); -public: - - bool has_write_ref_array_opt() { return true; } - bool has_write_region_opt() { return true; } - - inline void inline_write_region(MemRegion mr) { - dirty_MemRegion(mr); - } -protected: - void write_region_work(MemRegion mr) { - inline_write_region(mr); - } -public: - - inline void inline_write_ref_array(MemRegion mr) { - dirty_MemRegion(mr); - } -protected: - void write_ref_array_work(MemRegion mr) { - inline_write_ref_array(mr); - } -public: - - bool is_aligned(HeapWord* addr) { - return is_card_aligned(addr); - } - - // *** Card-table-barrier-specific things. - - template inline void inline_write_ref_field_pre(T* field, oop newVal) {} + template + void write_ref_field_post(void* field, oop newVal); - template inline void inline_write_ref_field(T* field, oop newVal, bool release); - - // These are used by G1, when it uses the card table as a temporary data - // structure for card claiming. - bool is_card_dirty(size_t card_index) { - return _byte_map[card_index] == dirty_card_val(); - } - - void mark_card_dirty(size_t card_index) { - _byte_map[card_index] = dirty_card_val(); - } - - bool is_card_clean(size_t card_index) { - return _byte_map[card_index] == clean_card_val(); - } - - // Card marking array base (adjusted for heap low boundary) - // This would be the 0th element of _byte_map, if the heap started at 0x0. - // But since the heap starts at some higher address, this points to somewhere - // before the beginning of the actual _byte_map. - jbyte* byte_map_base; - - // Return true if "p" is at the start of a card. - bool is_card_aligned(HeapWord* p) { - jbyte* pcard = byte_for(p); - return (addr_for(pcard) == p); - } - - HeapWord* align_to_card_boundary(HeapWord* p) { - jbyte* pcard = byte_for(p + card_size_in_words - 1); - return addr_for(pcard); - } - - // The kinds of precision a CardTableModRefBS may offer. - enum PrecisionStyle { - Precise, - ObjHeadPreciseArray - }; - - // Tells what style of precision this card table offers. - PrecisionStyle precision() { - return ObjHeadPreciseArray; // Only one supported for now. - } + virtual void write_region(MemRegion mr); + virtual void write_ref_array_region(MemRegion mr); // ModRefBS functions. virtual void invalidate(MemRegion mr); - void clear(MemRegion mr); - void dirty(MemRegion mr); - - // *** Card-table-RemSet-specific things. - - static uintx ct_max_alignment_constraint(); - // Apply closure "cl" to the dirty cards containing some part of - // MemRegion "mr". - void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); - - // Return the MemRegion corresponding to the first maximal run - // of dirty cards lying completely within MemRegion mr. - // If reset is "true", then sets those card table entries to the given - // value. - MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, - int reset_val); - - // Provide read-only access to the card table array. - const jbyte* byte_for_const(const void* p) const { - return byte_for(p); - } - const jbyte* byte_after_const(const void* p) const { - return byte_after(p); - } - - // Mapping from card marking array entry to address of first word - HeapWord* addr_for(const jbyte* p) const { - assert(p >= _byte_map && p < _byte_map + _byte_map_size, - "out of bounds access to card marking array"); - size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); - HeapWord* result = (HeapWord*) (delta << card_shift); - assert(_whole_heap.contains(result), - "Returning result = " PTR_FORMAT " out of bounds of " - " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - return result; - } + // Print a description of the memory for the barrier set + virtual void print_on(outputStream* st) const; - // Mapping from address to card marking array index. - size_t index_for(void* p) { - assert(_whole_heap.contains(p), - "Attempt to access p = " PTR_FORMAT " out of bounds of " - " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - return byte_for(p) - _byte_map; - } + // ReduceInitialCardMarks + void new_deferred_store_barrier(JavaThread* thread, oop new_obj); - const jbyte* byte_for_index(const size_t card_index) const { - return _byte_map + card_index; + // If the CollectedHeap was asked to defer a store barrier above, + // this informs it to flush such a deferred store barrier to the + // remembered set. + void flush_deferred_store_barrier(JavaThread* thread); + + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. If such permission + // is granted for this heap type, the compiler promises to call + // defer_store_barrier() below on any slow path allocation of + // a new object for which such initializing store barriers will + // have been elided. G1, like CMS, allows this, but should be + // ready to provide a compensating write barrier as necessary + // if that storage came out of a non-young region. The efficiency + // of this implementation depends crucially on being able to + // answer very efficiently in constant time whether a piece of + // storage in the heap comes from a young region or not. + // See ReduceInitialCardMarks. + virtual bool can_elide_tlab_store_barriers() const { return true; } + + // If a compiler is eliding store barriers for TLAB-allocated objects, + // we will be informed of a slow-path allocation by a call + // to new_deferred_store_barrier() above. Such a call precedes the + // initialization of the object itself, and no post-store-barriers will + // be issued. Some heap types require that the barrier strictly follows + // the initializing stores. (This is currently implemented by deferring the + // barrier until the next slow-path allocation or gc-related safepoint.) + // This interface answers whether a particular barrier type needs the card + // mark to be thus strictly sequenced after the stores. + virtual bool card_mark_must_follow_store() const; + + void on_slowpath_allocation(JavaThread* thread, oop new_obj) { + if (_can_elide_tlab_store_barriers) { + new_deferred_store_barrier(thread, new_obj); + } } + void on_destroy_thread(JavaThread* thread); + void make_parsable(JavaThread* thread); - // Print a description of the memory for the barrier set - virtual void print_on(outputStream* st) const; - - void verify(); - void verify_guard(); +protected: + virtual BarrierSetCodeGen* make_code_gen(); + virtual C1BarrierSetCodeGen* make_c1_code_gen(); + virtual C2BarrierSetCodeGen* make_c2_code_gen(); - // val_equals -> it will check that all cards covered by mr equal val - // !val_equals -> it will check that all cards covered by mr do not equal val - void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; - void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; - void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; +public: + template + class AccessBarrier: public ModRefBarrierSet::AccessBarrier {}; }; template<> -struct BarrierSet::GetName { +struct BSTypeToName { static const BarrierSet::Name value = BarrierSet::CardTableModRef; }; +template<> +struct BSNameToType { + typedef CardTableModRefBS type; +}; #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP