src/share/vm/gc_implementation/g1/heapRegion.hpp

Print this page
rev 6589 : 8047818: G1 HeapRegions can no longer be ContiguousSpaces
Reviewed-by:

*** 23,33 **** */ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP ! #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #include "gc_implementation/g1/survRateGroup.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/space.inline.hpp" --- 23,33 ---- */ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP ! #include "gc_implementation/g1/g1BlockOffsetTable.hpp" #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #include "gc_implementation/g1/survRateGroup.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/space.inline.hpp"
*** 44,55 **** // The problem is that the existence of this method breaks // the independence of barrier sets from remembered sets. // The solution is to remove this method from the definition // of a Space. - class CompactibleSpace; - class ContiguousSpace; class HeapRegionRemSet; class HeapRegionRemSetIterator; class HeapRegion; class HeapRegionSetBase; class nmethod; --- 44,53 ----
*** 123,136 **** // invalid. Time stamps (on the regions and also on the // G1CollectedHeap) are reset at every cleanup (we iterate over // the regions anyway) and at the end of a Full GC. The current scheme // that uses sequential unsigned ints will fail only if we have 4b // evacuation pauses between two cleanups, which is _highly_ unlikely. ! ! class G1OffsetTableContigSpace: public ContiguousSpace { friend class VMStructs; protected: G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; volatile unsigned _gc_time_stamp; // When we need to retire an allocation region, while other threads // are also concurrently trying to allocate into it, we typically --- 121,136 ---- // invalid. Time stamps (on the regions and also on the // G1CollectedHeap) are reset at every cleanup (we iterate over // the regions anyway) and at the end of a Full GC. The current scheme // that uses sequential unsigned ints will fail only if we have 4b // evacuation pauses between two cleanups, which is _highly_ unlikely. ! class G1OffsetTableContigSpace: public CompactibleSpace { friend class VMStructs; + HeapWord* _top; protected: + inline HeapWord* cas_allocate_inner(size_t size); + inline HeapWord* allocate_inner(size_t size); G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; volatile unsigned _gc_time_stamp; // When we need to retire an allocation region, while other threads // are also concurrently trying to allocate into it, we typically
*** 142,151 **** --- 142,164 ---- public: G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr); + inline void set_top(HeapWord* value) { _top = value; } + HeapWord* top() const { return _top; } + void reset_after_compaction() { set_top(compaction_top()); } + + size_t used() const { return byte_size(bottom(), top()); } + size_t free() const { return byte_size(top(), end()); } + bool is_free_block(const HeapWord* p) const { return p >= top(); } + + MemRegion used_region() const { return MemRegion(bottom(), top()); } + + void object_iterate(ObjectClosure* blk); + void safe_object_iterate(ObjectClosure* blk); + void set_bottom(HeapWord* value); void set_end(HeapWord* value); virtual HeapWord* saved_mark_word() const; void record_top_and_timestamp();
*** 166,175 **** --- 179,190 ---- virtual void clear(bool mangle_space); HeapWord* block_start(const void* p); HeapWord* block_start_const(const void* p) const; + void prepare_for_compaction(CompactPoint* cp); + // Add offset table update. virtual HeapWord* allocate(size_t word_size); HeapWord* par_allocate(size_t word_size); // MarkSweep support phase3
*** 351,368 **** AggregateCountClaimValue = 7, VerifyCountClaimValue = 8, ParMarkRootClaimValue = 9 }; ! inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { ! assert(is_young(), "we can only skip BOT updates on young regions"); ! return ContiguousSpace::par_allocate(word_size); ! } ! inline HeapWord* allocate_no_bot_updates(size_t word_size) { ! assert(is_young(), "we can only skip BOT updates on young regions"); ! return ContiguousSpace::allocate(word_size); ! } // If this region is a member of a HeapRegionSeq, the index in that // sequence, otherwise -1. uint hrs_index() const { return _hrs_index; } --- 366,380 ---- AggregateCountClaimValue = 7, VerifyCountClaimValue = 8, ParMarkRootClaimValue = 9 }; ! bool block_is_obj(const HeapWord* p) const; ! size_t block_size(const HeapWord* p) const; ! ! inline HeapWord* par_allocate_no_bot_updates(size_t word_size); ! inline HeapWord* allocate_no_bot_updates(size_t word_size); // If this region is a member of a HeapRegionSeq, the index in that // sequence, otherwise -1. uint hrs_index() const { return _hrs_index; }