< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page
rev 13047 : imported patch 8071280-specialize-heapregion-oops-on-card-seq-iterate
rev 13048 : imported patch 8071280-kim-review
rev 13049 : imported patch 8071280-kim-sangheon-review
rev 13050 : imported patch 8071280-erikh-review
rev 13051 : imported patch 8162928-micro-optimizations-in-remembered-set-scan
rev 13056 : imported patch 8177044-remove-scan-top


  79 // be reconciled, then G1OffsetTableContigSpace could go away.
  80 
  81 // The idea behind time stamps is the following. We want to keep track of
  82 // the highest address where it's safe to scan objects for each region.
  83 // This is only relevant for current GC alloc regions so we keep a time stamp
  84 // per region to determine if the region has been allocated during the current
  85 // GC or not. If the time stamp is current we report a scan_top value which
  86 // was saved at the end of the previous GC for retained alloc regions and which is
  87 // equal to the bottom for all other regions.
  88 // There is a race between card scanners and allocating gc workers where we must ensure
  89 // that card scanners do not read the memory allocated by the gc workers.
  90 // In order to enforce that, we must not return a value of _top which is more recent than the
  91 // time stamp. This is due to the fact that a region may become a gc alloc region at
  92 // some point after we've read the timestamp value as being < the current time stamp.
  93 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
  94 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
  95 // evacuation pauses between two cleanups, which is _highly_ unlikely.
  96 class G1ContiguousSpace: public CompactibleSpace {
  97   friend class VMStructs;
  98   HeapWord* volatile _top;
  99   HeapWord* volatile _scan_top;
 100  protected:
 101   G1BlockOffsetTablePart _bot_part;
 102   Mutex _par_alloc_lock;
 103   volatile uint _gc_time_stamp;
 104   // When we need to retire an allocation region, while other threads
 105   // are also concurrently trying to allocate into it, we typically
 106   // allocate a dummy object at the end of the region to ensure that
 107   // no more allocations can take place in it. However, sometimes we
 108   // want to know where the end of the last "real" object we allocated
 109   // into the region was and this is what this keeps track.
 110   HeapWord* _pre_dummy_top;
 111 
 112  public:
 113   G1ContiguousSpace(G1BlockOffsetTable* bot);
 114 
 115   void set_top(HeapWord* value) { _top = value; }
 116   HeapWord* top() const { return _top; }
 117 
 118  protected:
 119   // Reset the G1ContiguousSpace.


 130   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 131   // space allocated.
 132   // This version synchronizes with other calls to par_allocate_impl().
 133   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 134 
 135  public:
 136   void reset_after_compaction() { set_top(compaction_top()); }
 137 
 138   size_t used() const { return byte_size(bottom(), top()); }
 139   size_t free() const { return byte_size(top(), end()); }
 140   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 141 
 142   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 143 
 144   void object_iterate(ObjectClosure* blk);
 145   void safe_object_iterate(ObjectClosure* blk);
 146 
 147   void mangle_unused_area() PRODUCT_RETURN;
 148   void mangle_unused_area_complete() PRODUCT_RETURN;
 149 
 150   HeapWord* scan_top() const;
 151   void record_timestamp();
 152   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 153   uint get_gc_time_stamp() { return _gc_time_stamp; }
 154   void record_retained_region();
 155 
 156   // See the comment above in the declaration of _pre_dummy_top for an
 157   // explanation of what it is.
 158   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 159     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 160     _pre_dummy_top = pre_dummy_top;
 161   }
 162   HeapWord* pre_dummy_top() {
 163     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 164   }
 165   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 166 
 167   virtual void clear(bool mangle_space);
 168 
 169   HeapWord* block_start(const void* p);
 170   HeapWord* block_start_const(const void* p) const;
 171 
 172   // Allocation (return NULL if full).  Assumes the caller has established
 173   // mutually exclusive access to the space.
 174   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);




  79 // be reconciled, then G1OffsetTableContigSpace could go away.
  80 
  81 // The idea behind time stamps is the following. We want to keep track of
  82 // the highest address where it's safe to scan objects for each region.
  83 // This is only relevant for current GC alloc regions so we keep a time stamp
  84 // per region to determine if the region has been allocated during the current
  85 // GC or not. If the time stamp is current we report a scan_top value which
  86 // was saved at the end of the previous GC for retained alloc regions and which is
  87 // equal to the bottom for all other regions.
  88 // There is a race between card scanners and allocating gc workers where we must ensure
  89 // that card scanners do not read the memory allocated by the gc workers.
  90 // In order to enforce that, we must not return a value of _top which is more recent than the
  91 // time stamp. This is due to the fact that a region may become a gc alloc region at
  92 // some point after we've read the timestamp value as being < the current time stamp.
  93 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
  94 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
  95 // evacuation pauses between two cleanups, which is _highly_ unlikely.
  96 class G1ContiguousSpace: public CompactibleSpace {
  97   friend class VMStructs;
  98   HeapWord* volatile _top;

  99  protected:
 100   G1BlockOffsetTablePart _bot_part;
 101   Mutex _par_alloc_lock;
 102   volatile uint _gc_time_stamp;
 103   // When we need to retire an allocation region, while other threads
 104   // are also concurrently trying to allocate into it, we typically
 105   // allocate a dummy object at the end of the region to ensure that
 106   // no more allocations can take place in it. However, sometimes we
 107   // want to know where the end of the last "real" object we allocated
 108   // into the region was and this is what this keeps track.
 109   HeapWord* _pre_dummy_top;
 110 
 111  public:
 112   G1ContiguousSpace(G1BlockOffsetTable* bot);
 113 
 114   void set_top(HeapWord* value) { _top = value; }
 115   HeapWord* top() const { return _top; }
 116 
 117  protected:
 118   // Reset the G1ContiguousSpace.


 129   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 130   // space allocated.
 131   // This version synchronizes with other calls to par_allocate_impl().
 132   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 133 
 134  public:
 135   void reset_after_compaction() { set_top(compaction_top()); }
 136 
 137   size_t used() const { return byte_size(bottom(), top()); }
 138   size_t free() const { return byte_size(top(), end()); }
 139   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 140 
 141   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 142 
 143   void object_iterate(ObjectClosure* blk);
 144   void safe_object_iterate(ObjectClosure* blk);
 145 
 146   void mangle_unused_area() PRODUCT_RETURN;
 147   void mangle_unused_area_complete() PRODUCT_RETURN;
 148 

 149   void record_timestamp();
 150   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 151   uint get_gc_time_stamp() { return _gc_time_stamp; }

 152 
 153   // See the comment above in the declaration of _pre_dummy_top for an
 154   // explanation of what it is.
 155   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 156     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 157     _pre_dummy_top = pre_dummy_top;
 158   }
 159   HeapWord* pre_dummy_top() {
 160     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 161   }
 162   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 163 
 164   virtual void clear(bool mangle_space);
 165 
 166   HeapWord* block_start(const void* p);
 167   HeapWord* block_start_const(const void* p) const;
 168 
 169   // Allocation (return NULL if full).  Assumes the caller has established
 170   // mutually exclusive access to the space.
 171   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);


< prev index next >