< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page
rev 8868 : imported patch 8067336-allow-that-plab-allocations-at-end-of-regions-are-flexible
rev 8869 : [mq]: refactor-desired-actual-size
rev 8870 : [mq]: tom-review


  92 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
  93 // be reconciled, then G1OffsetTableContigSpace could go away.
  94 
  95 // The idea behind time stamps is the following. We want to keep track of
  96 // the highest address where it's safe to scan objects for each region.
  97 // This is only relevant for current GC alloc regions so we keep a time stamp
  98 // per region to determine if the region has been allocated during the current
  99 // GC or not. If the time stamp is current we report a scan_top value which
 100 // was saved at the end of the previous GC for retained alloc regions and which is
 101 // equal to the bottom for all other regions.
 102 // There is a race between card scanners and allocating gc workers where we must ensure
 103 // that card scanners do not read the memory allocated by the gc workers.
 104 // In order to enforce that, we must not return a value of _top which is more recent than the
 105 // time stamp. This is due to the fact that a region may become a gc alloc region at
 106 // some point after we've read the timestamp value as being < the current time stamp.
 107 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 108 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 109 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 110 class G1OffsetTableContigSpace: public CompactibleSpace {
 111   friend class VMStructs;
 112   HeapWord* _top;
 113   HeapWord* volatile _scan_top;
 114  protected:
 115   G1BlockOffsetArrayContigSpace _offsets;
 116   Mutex _par_alloc_lock;
 117   volatile unsigned _gc_time_stamp;
 118   // When we need to retire an allocation region, while other threads
 119   // are also concurrently trying to allocate into it, we typically
 120   // allocate a dummy object at the end of the region to ensure that
 121   // no more allocations can take place in it. However, sometimes we
 122   // want to know where the end of the last "real" object we allocated
 123   // into the region was and this is what this keeps track.
 124   HeapWord* _pre_dummy_top;
 125 
 126  public:
 127   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 128                            MemRegion mr);
 129 
 130   void set_top(HeapWord* value) { _top = value; }
 131   HeapWord* top() const { return _top; }
 132 
 133  protected:
 134   // Reset the G1OffsetTableContigSpace.
 135   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 136 
 137   HeapWord** top_addr() { return &_top; }
 138   // Try to allocate at least min_word_size and up to desired_size from this Space.
 139   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 140   // space allocated.
 141   // This version assumes that all allocation requests to this Space are properly
 142   // synchronized.
 143   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 144   // Try to allocate at least min_word_size and up to desired_size from this Space.
 145   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 146   // space allocated.
 147   // This version synchronizes with other calls to par_allocate_impl().
 148   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 149 
 150  public:
 151   void reset_after_compaction() { set_top(compaction_top()); }
 152 
 153   size_t used() const { return byte_size(bottom(), top()); }
 154   size_t free() const { return byte_size(top(), end()); }
 155   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 156 
 157   MemRegion used_region() const { return MemRegion(bottom(), top()); }




  92 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
  93 // be reconciled, then G1OffsetTableContigSpace could go away.
  94 
  95 // The idea behind time stamps is the following. We want to keep track of
  96 // the highest address where it's safe to scan objects for each region.
  97 // This is only relevant for current GC alloc regions so we keep a time stamp
  98 // per region to determine if the region has been allocated during the current
  99 // GC or not. If the time stamp is current we report a scan_top value which
 100 // was saved at the end of the previous GC for retained alloc regions and which is
 101 // equal to the bottom for all other regions.
 102 // There is a race between card scanners and allocating gc workers where we must ensure
 103 // that card scanners do not read the memory allocated by the gc workers.
 104 // In order to enforce that, we must not return a value of _top which is more recent than the
 105 // time stamp. This is due to the fact that a region may become a gc alloc region at
 106 // some point after we've read the timestamp value as being < the current time stamp.
 107 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 108 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 109 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 110 class G1OffsetTableContigSpace: public CompactibleSpace {
 111   friend class VMStructs;
 112   HeapWord* volatile _top;
 113   HeapWord* volatile _scan_top;
 114  protected:
 115   G1BlockOffsetArrayContigSpace _offsets;
 116   Mutex _par_alloc_lock;
 117   volatile unsigned _gc_time_stamp;
 118   // When we need to retire an allocation region, while other threads
 119   // are also concurrently trying to allocate into it, we typically
 120   // allocate a dummy object at the end of the region to ensure that
 121   // no more allocations can take place in it. However, sometimes we
 122   // want to know where the end of the last "real" object we allocated
 123   // into the region was and this is what this keeps track.
 124   HeapWord* _pre_dummy_top;
 125 
 126  public:
 127   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 128                            MemRegion mr);
 129 
 130   void set_top(HeapWord* value) { _top = value; }
 131   HeapWord* top() const { return _top; }
 132 
 133  protected:
 134   // Reset the G1OffsetTableContigSpace.
 135   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 136 
 137   HeapWord* volatile* top_addr() { return &_top; }
 138   // Try to allocate at least min_word_size and up to desired_size from this Space.
 139   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 140   // space allocated.
 141   // This version assumes that all allocation requests to this Space are properly
 142   // synchronized.
 143   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 144   // Try to allocate at least min_word_size and up to desired_size from this Space.
 145   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 146   // space allocated.
 147   // This version synchronizes with other calls to par_allocate_impl().
 148   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 149 
 150  public:
 151   void reset_after_compaction() { set_top(compaction_top()); }
 152 
 153   size_t used() const { return byte_size(bottom(), top()); }
 154   size_t free() const { return byte_size(top(), end()); }
 155   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 156 
 157   MemRegion used_region() const { return MemRegion(bottom(), top()); }


< prev index next >