src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp

Print this page
rev 6802 : imported patch refactor-heapregionseq
rev 6805 : imported patch commit-uncommit-within-heap
rev 6806 : imported patch mikael-suggestions
rev 6807 : imported patch bengt-suggestions

*** 23,32 **** --- 23,33 ---- */ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP + #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/memRegion.hpp" #include "runtime/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" // The CollectedHeap type requires subtypes to implement a method
*** 104,113 **** --- 105,119 ---- // Same as above, but does not have any of the possible side effects // discussed above. inline HeapWord* block_start_const(const void* addr) const; }; + class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener { + public: + virtual void on_commit(uint start_idx, size_t num_regions); + }; + // This implementation of "G1BlockOffsetTable" divides the covered region // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry // for each such subregion indicates how far back one must go to find the // start of the chunk that includes the first word of the subregion. //
*** 123,216 **** friend class G1BlockOffsetArray; friend class G1BlockOffsetArrayContigSpace; friend class VMStructs; private: // The reserved region covered by the shared array. MemRegion _reserved; // End of the current committed region. HeapWord* _end; // Array for keeping offsets for retrieving object start fast given an // address. - VirtualSpace _vs; u_char* _offset_array; // byte array keeping backwards offsets - void check_index(size_t index, const char* msg) const { - assert(index < _vs.committed_size(), - err_msg("%s - " - "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT, - msg, index, _vs.committed_size())); - } - void check_offset(size_t offset, const char* msg) const { assert(offset <= N_words, err_msg("%s - " "offset: " SIZE_FORMAT", N_words: %u", msg, offset, (uint)N_words)); } // Bounds checking accessors: // For performance these have to devolve to array accesses in product builds. ! u_char offset_array(size_t index) const { ! check_index(index, "index out of range"); ! return _offset_array[index]; ! } void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); ! void set_offset_array(size_t index, u_char offset) { ! check_index(index, "index out of range"); ! check_offset(offset, "offset too large"); _offset_array[index] = offset; } ! void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { ! check_index(index, "index out of range"); ! assert(high >= low, "addresses out of order"); ! check_offset(pointer_delta(high, low), "offset too large"); ! _offset_array[index] = (u_char) pointer_delta(high, low); ! } ! ! void set_offset_array(size_t left, size_t right, u_char offset) { ! check_index(right, "right index out of range"); ! assert(left <= right, "indexes out of order"); ! size_t num_cards = right - left + 1; ! if (UseMemSetInBOT) { ! memset(&_offset_array[left], offset, num_cards); ! } else { ! size_t i = left; ! const size_t end = i + num_cards; ! for (; i < end; i++) { ! _offset_array[i] = offset; ! } ! } ! } ! void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { ! check_index(index, "index out of range"); ! assert(high >= low, "addresses out of order"); ! check_offset(pointer_delta(high, low), "offset too large"); ! assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset"); ! } bool is_card_boundary(HeapWord* p) const; // Return the number of slots needed for an offset array // that covers mem_region_words words. ! // We always add an extra slot because if an object ! // ends on a card boundary we put a 0 in the next ! // offset array slot, so we want that slot always ! // to be reserved. ! ! size_t compute_size(size_t mem_region_words) { ! size_t number_of_slots = (mem_region_words / N_words) + 1; ! return ReservedSpace::page_align_size_up(number_of_slots); } - public: enum SomePublicConstants { LogN = 9, LogN_words = LogN - LogHeapWordSize, N_bytes = 1 << LogN, N_words = 1 << LogN_words --- 129,185 ---- friend class G1BlockOffsetArray; friend class G1BlockOffsetArrayContigSpace; friend class VMStructs; private: + G1BlockOffsetSharedArrayMappingChangedListener _listener; // The reserved region covered by the shared array. MemRegion _reserved; // End of the current committed region. HeapWord* _end; // Array for keeping offsets for retrieving object start fast given an // address. u_char* _offset_array; // byte array keeping backwards offsets void check_offset(size_t offset, const char* msg) const { assert(offset <= N_words, err_msg("%s - " "offset: " SIZE_FORMAT", N_words: %u", msg, offset, (uint)N_words)); } // Bounds checking accessors: // For performance these have to devolve to array accesses in product builds. ! inline u_char offset_array(size_t index) const; void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); ! void set_offset_array_raw(size_t index, u_char offset) { _offset_array[index] = offset; } ! inline void set_offset_array(size_t index, u_char offset); ! inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low); ! ! inline void set_offset_array(size_t left, size_t right, u_char offset); ! ! inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const; bool is_card_boundary(HeapWord* p) const; + public: + // Return the number of slots needed for an offset array // that covers mem_region_words words. ! static size_t compute_size(size_t mem_region_words) { ! size_t number_of_slots = (mem_region_words / N_words); ! return ReservedSpace::allocation_align_size_up(number_of_slots); } enum SomePublicConstants { LogN = 9, LogN_words = LogN - LogHeapWordSize, N_bytes = 1 << LogN, N_words = 1 << LogN_words
*** 220,244 **** // "base + init_word_size". In the future, the table may be expanded // (see "resize" below) up to the size of "_reserved" (which must be at // least "init_word_size".) The contents of the initial table are // undefined; it is the responsibility of the constituent // G1BlockOffsetTable(s) to initialize cards. ! G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); ! ! // Notes a change in the committed size of the region covered by the ! // table. The "new_word_size" may not be larger than the size of the ! // reserved region this table covers. ! void resize(size_t new_word_size); void set_bottom(HeapWord* new_bottom); // Return the appropriate index into "_offset_array" for "p". inline size_t index_for(const void* p) const; // Return the address indicating the start of the region corresponding to // "index" in "_offset_array". inline HeapWord* address_for_index(size_t index) const; }; // And here is the G1BlockOffsetTable subtype that uses the array. class G1BlockOffsetArray: public G1BlockOffsetTable { --- 189,213 ---- // "base + init_word_size". In the future, the table may be expanded // (see "resize" below) up to the size of "_reserved" (which must be at // least "init_word_size".) The contents of the initial table are // undefined; it is the responsibility of the constituent // G1BlockOffsetTable(s) to initialize cards. ! G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage); void set_bottom(HeapWord* new_bottom); // Return the appropriate index into "_offset_array" for "p". inline size_t index_for(const void* p) const; + inline size_t index_for_raw(const void* p) const; // Return the address indicating the start of the region corresponding to // "index" in "_offset_array". inline HeapWord* address_for_index(size_t index) const; + // Variant of address_for_index that does not check the index for validity. + inline HeapWord* address_for_index_raw(size_t index) const { + return _reserved.start() + (index << LogN_words); + } }; // And here is the G1BlockOffsetTable subtype that uses the array. class G1BlockOffsetArray: public G1BlockOffsetTable {
*** 474,495 **** void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { alloc_block_work2(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end); } // Zero out the entry for _bottom (offset will be zero). void zero_bottom_entry(); public: G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); // Initialize the threshold to reflect the first boundary after the // bottom of the covered region. HeapWord* initialize_threshold(); void reset_bot() { ! zero_bottom_entry(); ! initialize_threshold(); } // Return the next threshold, the point at which the table should be // updated. HeapWord* threshold() const { return _next_offset_threshold; } --- 443,470 ---- void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { alloc_block_work2(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end); } + // Variant of zero_bottom_entry that does not check for availability of the + // memory first. + void zero_bottom_entry_raw(); + // Variant of initialize_threshold that does not check for availability of the + // memory first. + HeapWord* initialize_threshold_raw(); // Zero out the entry for _bottom (offset will be zero). void zero_bottom_entry(); public: G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); // Initialize the threshold to reflect the first boundary after the // bottom of the covered region. HeapWord* initialize_threshold(); void reset_bot() { ! zero_bottom_entry_raw(); ! initialize_threshold_raw(); } // Return the next threshold, the point at which the table should be // updated. HeapWord* threshold() const { return _next_offset_threshold; }