< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page
rev 10868 : [mq]: 8153170-card-live-data-does-handle-eager-reclaim


 107 // This is only relevant for current GC alloc regions so we keep a time stamp
 108 // per region to determine if the region has been allocated during the current
 109 // GC or not. If the time stamp is current we report a scan_top value which
 110 // was saved at the end of the previous GC for retained alloc regions and which is
 111 // equal to the bottom for all other regions.
 112 // There is a race between card scanners and allocating gc workers where we must ensure
 113 // that card scanners do not read the memory allocated by the gc workers.
 114 // In order to enforce that, we must not return a value of _top which is more recent than the
 115 // time stamp. This is due to the fact that a region may become a gc alloc region at
 116 // some point after we've read the timestamp value as being < the current time stamp.
 117 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 118 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 119 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 120 class G1ContiguousSpace: public CompactibleSpace {
 121   friend class VMStructs;
 122   HeapWord* volatile _top;
 123   HeapWord* volatile _scan_top;
 124  protected:
 125   G1BlockOffsetTablePart _bot_part;
 126   Mutex _par_alloc_lock;
 127   volatile unsigned _gc_time_stamp;
 128   // When we need to retire an allocation region, while other threads
 129   // are also concurrently trying to allocate into it, we typically
 130   // allocate a dummy object at the end of the region to ensure that
 131   // no more allocations can take place in it. However, sometimes we
 132   // want to know where the end of the last "real" object we allocated
 133   // into the region was and this is what this keeps track.
 134   HeapWord* _pre_dummy_top;
 135 
 136  public:
 137   G1ContiguousSpace(G1BlockOffsetTable* bot);
 138 
 139   void set_top(HeapWord* value) { _top = value; }
 140   HeapWord* top() const { return _top; }
 141 
 142  protected:
 143   // Reset the G1ContiguousSpace.
 144   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 145 
 146   HeapWord* volatile* top_addr() { return &_top; }
 147   // Try to allocate at least min_word_size and up to desired_size from this Space.


 157   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 158 
 159  public:
 160   void reset_after_compaction() { set_top(compaction_top()); }
 161 
 162   size_t used() const { return byte_size(bottom(), top()); }
 163   size_t free() const { return byte_size(top(), end()); }
 164   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 165 
 166   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 167 
 168   void object_iterate(ObjectClosure* blk);
 169   void safe_object_iterate(ObjectClosure* blk);
 170 
 171   void mangle_unused_area() PRODUCT_RETURN;
 172   void mangle_unused_area_complete() PRODUCT_RETURN;
 173 
 174   HeapWord* scan_top() const;
 175   void record_timestamp();
 176   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 177   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 178   void record_retained_region();
 179 
 180   // See the comment above in the declaration of _pre_dummy_top for an
 181   // explanation of what it is.
 182   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 183     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 184     _pre_dummy_top = pre_dummy_top;
 185   }
 186   HeapWord* pre_dummy_top() {
 187     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 188   }
 189   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 190 
 191   virtual void clear(bool mangle_space);
 192 
 193   HeapWord* block_start(const void* p);
 194   HeapWord* block_start_const(const void* p) const;
 195 
 196   // Allocation (return NULL if full).  Assumes the caller has established
 197   // mutually exclusive access to the space.




 107 // This is only relevant for current GC alloc regions so we keep a time stamp
 108 // per region to determine if the region has been allocated during the current
 109 // GC or not. If the time stamp is current we report a scan_top value which
 110 // was saved at the end of the previous GC for retained alloc regions and which is
 111 // equal to the bottom for all other regions.
 112 // There is a race between card scanners and allocating gc workers where we must ensure
 113 // that card scanners do not read the memory allocated by the gc workers.
 114 // In order to enforce that, we must not return a value of _top which is more recent than the
 115 // time stamp. This is due to the fact that a region may become a gc alloc region at
 116 // some point after we've read the timestamp value as being < the current time stamp.
 117 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 118 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 119 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 120 class G1ContiguousSpace: public CompactibleSpace {
 121   friend class VMStructs;
 122   HeapWord* volatile _top;
 123   HeapWord* volatile _scan_top;
 124  protected:
 125   G1BlockOffsetTablePart _bot_part;
 126   Mutex _par_alloc_lock;
 127   volatile uint _gc_time_stamp;
 128   // When we need to retire an allocation region, while other threads
 129   // are also concurrently trying to allocate into it, we typically
 130   // allocate a dummy object at the end of the region to ensure that
 131   // no more allocations can take place in it. However, sometimes we
 132   // want to know where the end of the last "real" object we allocated
 133   // into the region was and this is what this keeps track.
 134   HeapWord* _pre_dummy_top;
 135 
 136  public:
 137   G1ContiguousSpace(G1BlockOffsetTable* bot);
 138 
 139   void set_top(HeapWord* value) { _top = value; }
 140   HeapWord* top() const { return _top; }
 141 
 142  protected:
 143   // Reset the G1ContiguousSpace.
 144   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 145 
 146   HeapWord* volatile* top_addr() { return &_top; }
 147   // Try to allocate at least min_word_size and up to desired_size from this Space.


 157   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 158 
 159  public:
 160   void reset_after_compaction() { set_top(compaction_top()); }
 161 
 162   size_t used() const { return byte_size(bottom(), top()); }
 163   size_t free() const { return byte_size(top(), end()); }
 164   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 165 
 166   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 167 
 168   void object_iterate(ObjectClosure* blk);
 169   void safe_object_iterate(ObjectClosure* blk);
 170 
 171   void mangle_unused_area() PRODUCT_RETURN;
 172   void mangle_unused_area_complete() PRODUCT_RETURN;
 173 
 174   HeapWord* scan_top() const;
 175   void record_timestamp();
 176   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 177   uint get_gc_time_stamp() { return _gc_time_stamp; }
 178   void record_retained_region();
 179 
 180   // See the comment above in the declaration of _pre_dummy_top for an
 181   // explanation of what it is.
 182   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 183     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 184     _pre_dummy_top = pre_dummy_top;
 185   }
 186   HeapWord* pre_dummy_top() {
 187     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 188   }
 189   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 190 
 191   virtual void clear(bool mangle_space);
 192 
 193   HeapWord* block_start(const void* p);
 194   HeapWord* block_start_const(const void* p) const;
 195 
 196   // Allocation (return NULL if full).  Assumes the caller has established
 197   // mutually exclusive access to the space.


< prev index next >