< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page




  98 // significantly, and we need functionality that is only in the G1 version.
  99 // So I copied that code, which led to an alternate G1 version of
 100 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
 101 // be reconciled, then G1OffsetTableContigSpace could go away.
 102 
 103 // The idea behind time stamps is the following. We want to keep track of
 104 // the highest address where it's safe to scan objects for each region.
 105 // This is only relevant for current GC alloc regions so we keep a time stamp
 106 // per region to determine if the region has been allocated during the current
 107 // GC or not. If the time stamp is current we report a scan_top value which
 108 // was saved at the end of the previous GC for retained alloc regions and which is
 109 // equal to the bottom for all other regions.
 110 // There is a race between card scanners and allocating gc workers where we must ensure
 111 // that card scanners do not read the memory allocated by the gc workers.
 112 // In order to enforce that, we must not return a value of _top which is more recent than the
 113 // time stamp. This is due to the fact that a region may become a gc alloc region at
 114 // some point after we've read the timestamp value as being < the current time stamp.
 115 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 116 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 117 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 118 class G1OffsetTableContigSpace: public CompactibleSpace {
 119   friend class VMStructs;
 120   HeapWord* volatile _top;
 121   HeapWord* volatile _scan_top;
 122  protected:
 123   G1BlockOffsetArrayContigSpace _offsets;
 124   Mutex _par_alloc_lock;
 125   volatile unsigned _gc_time_stamp;
 126   // When we need to retire an allocation region, while other threads
 127   // are also concurrently trying to allocate into it, we typically
 128   // allocate a dummy object at the end of the region to ensure that
 129   // no more allocations can take place in it. However, sometimes we
 130   // want to know where the end of the last "real" object we allocated
 131   // into the region was and this is what this keeps track.
 132   HeapWord* _pre_dummy_top;
 133 
 134  public:
 135   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 136                            MemRegion mr);
 137 
 138   void set_top(HeapWord* value) { _top = value; }
 139   HeapWord* top() const { return _top; }
 140 
 141  protected:
 142   // Reset the G1OffsetTableContigSpace.
 143   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 144 
 145   HeapWord* volatile* top_addr() { return &_top; }
 146   // Try to allocate at least min_word_size and up to desired_size from this Space.
 147   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 148   // space allocated.
 149   // This version assumes that all allocation requests to this Space are properly
 150   // synchronized.
 151   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 152   // Try to allocate at least min_word_size and up to desired_size from this Space.
 153   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 154   // space allocated.
 155   // This version synchronizes with other calls to par_allocate_impl().
 156   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 157 
 158  public:
 159   void reset_after_compaction() { set_top(compaction_top()); }
 160 
 161   size_t used() const { return byte_size(bottom(), top()); }
 162   size_t free() const { return byte_size(top(), end()); }
 163   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 164 
 165   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 166 
 167   void object_iterate(ObjectClosure* blk);
 168   void safe_object_iterate(ObjectClosure* blk);
 169 
 170   void set_bottom(HeapWord* value);
 171   void set_end(HeapWord* value);
 172 
 173   void mangle_unused_area() PRODUCT_RETURN;
 174   void mangle_unused_area_complete() PRODUCT_RETURN;
 175 
 176   HeapWord* scan_top() const;
 177   void record_timestamp();
 178   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 179   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 180   void record_retained_region();
 181 
 182   // See the comment above in the declaration of _pre_dummy_top for an
 183   // explanation of what it is.
 184   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 185     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 186     _pre_dummy_top = pre_dummy_top;
 187   }
 188   HeapWord* pre_dummy_top() {
 189     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 190   }
 191   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 192 


 196   HeapWord* block_start_const(const void* p) const;
 197 
 198   // Allocation (return NULL if full).  Assumes the caller has established
 199   // mutually exclusive access to the space.
 200   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 201   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 202   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 203 
 204   virtual HeapWord* allocate(size_t word_size);
 205   virtual HeapWord* par_allocate(size_t word_size);
 206 
 207   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
 208 
 209   // MarkSweep support phase3
 210   virtual HeapWord* initialize_threshold();
 211   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 212 
 213   virtual void print() const;
 214 
 215   void reset_bot() {
 216     _offsets.reset_bot();
 217   }
 218 
 219   void print_bot_on(outputStream* out) {
 220     _offsets.print_on(out);
 221   }
 222 };
 223 
 224 class HeapRegion: public G1OffsetTableContigSpace {
 225   friend class VMStructs;
 226   // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
 227   template <typename SpaceType>
 228   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
 229  private:
 230 
 231   // The remembered set for this region.
 232   // (Might want to make this "inline" later, to avoid some alloc failure
 233   // issues.)
 234   HeapRegionRemSet* _rem_set;
 235 
 236   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 237 
 238   // Auxiliary functions for scan_and_forward support.
 239   // See comments for CompactibleSpace for more information.
 240   inline HeapWord* scan_limit() const {
 241     return top();
 242   }
 243 
 244   inline bool scanned_block_is_obj(const HeapWord* addr) const {
 245     return true; // Always true, since scan_limit is top
 246   }
 247 
 248   inline size_t scanned_block_size(const HeapWord* addr) const {
 249     return HeapRegion::block_size(addr); // Avoid virtual call
 250   }
 251 
 252  protected:
 253   // The index of this region in the heap region sequence.
 254   uint  _hrm_index;
 255 
 256   AllocationContext_t _allocation_context;
 257 


 313     _prev_top_at_mark_start = bot;
 314     _next_top_at_mark_start = bot;
 315   }
 316 
 317   // Cached attributes used in the collection set policy information
 318 
 319   // The RSet length that was added to the total value
 320   // for the collection set.
 321   size_t _recorded_rs_length;
 322 
 323   // The predicted elapsed time that was added to total value
 324   // for the collection set.
 325   double _predicted_elapsed_time_ms;
 326 
 327   // The predicted number of bytes to copy that was added to
 328   // the total value for the collection set.
 329   size_t _predicted_bytes_to_copy;
 330 
 331  public:
 332   HeapRegion(uint hrm_index,
 333              G1BlockOffsetSharedArray* sharedOffsetArray,
 334              MemRegion mr);
 335 
 336   // Initializing the HeapRegion not only resets the data structure, but also
 337   // resets the BOT for that heap region.
 338   // The default values for clear_space means that we will do the clearing if
 339   // there's clearing to be done ourselves. We also always mangle the space.
 340   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
 341 
 342   static int    LogOfHRGrainBytes;
 343   static int    LogOfHRGrainWords;
 344 
 345   static size_t GrainBytes;
 346   static size_t GrainWords;
 347   static size_t CardsPerRegion;
 348 
 349   static size_t align_up_to_region_byte_size(size_t sz) {
 350     return (sz + (size_t) GrainBytes - 1) &
 351                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
 352   }
 353 




  98 // significantly, and we need functionality that is only in the G1 version.
  99 // So I copied that code, which led to an alternate G1 version of
 100 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
 101 // be reconciled, then G1OffsetTableContigSpace could go away.
 102 
 103 // The idea behind time stamps is the following. We want to keep track of
 104 // the highest address where it's safe to scan objects for each region.
 105 // This is only relevant for current GC alloc regions so we keep a time stamp
 106 // per region to determine if the region has been allocated during the current
 107 // GC or not. If the time stamp is current we report a scan_top value which
 108 // was saved at the end of the previous GC for retained alloc regions and which is
 109 // equal to the bottom for all other regions.
 110 // There is a race between card scanners and allocating gc workers where we must ensure
 111 // that card scanners do not read the memory allocated by the gc workers.
 112 // In order to enforce that, we must not return a value of _top which is more recent than the
 113 // time stamp. This is due to the fact that a region may become a gc alloc region at
 114 // some point after we've read the timestamp value as being < the current time stamp.
 115 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 116 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 117 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 118 class G1ContiguousSpace: public CompactibleSpace {
 119   friend class VMStructs;
 120   HeapWord* volatile _top;
 121   HeapWord* volatile _scan_top;
 122  protected:
 123   G1BlockOffsetTablePart _bot_part;
 124   Mutex _par_alloc_lock;
 125   volatile unsigned _gc_time_stamp;
 126   // When we need to retire an allocation region, while other threads
 127   // are also concurrently trying to allocate into it, we typically
 128   // allocate a dummy object at the end of the region to ensure that
 129   // no more allocations can take place in it. However, sometimes we
 130   // want to know where the end of the last "real" object we allocated
 131   // into the region was and this is what this keeps track.
 132   HeapWord* _pre_dummy_top;
 133 
 134  public:
 135   G1ContiguousSpace(G1BlockOffsetTable* bot);

 136 
 137   void set_top(HeapWord* value) { _top = value; }
 138   HeapWord* top() const { return _top; }
 139 
 140  protected:
 141   // Reset the G1ContiguousSpace.
 142   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 143 
 144   HeapWord* volatile* top_addr() { return &_top; }
 145   // Try to allocate at least min_word_size and up to desired_size from this Space.
 146   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 147   // space allocated.
 148   // This version assumes that all allocation requests to this Space are properly
 149   // synchronized.
 150   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 151   // Try to allocate at least min_word_size and up to desired_size from this Space.
 152   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 153   // space allocated.
 154   // This version synchronizes with other calls to par_allocate_impl().
 155   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 156 
 157  public:
 158   void reset_after_compaction() { set_top(compaction_top()); }
 159 
 160   size_t used() const { return byte_size(bottom(), top()); }
 161   size_t free() const { return byte_size(top(), end()); }
 162   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 163 
 164   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 165 
 166   void object_iterate(ObjectClosure* blk);
 167   void safe_object_iterate(ObjectClosure* blk);
 168 



 169   void mangle_unused_area() PRODUCT_RETURN;
 170   void mangle_unused_area_complete() PRODUCT_RETURN;
 171 
 172   HeapWord* scan_top() const;
 173   void record_timestamp();
 174   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 175   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 176   void record_retained_region();
 177 
 178   // See the comment above in the declaration of _pre_dummy_top for an
 179   // explanation of what it is.
 180   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 181     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 182     _pre_dummy_top = pre_dummy_top;
 183   }
 184   HeapWord* pre_dummy_top() {
 185     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 186   }
 187   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 188 


 192   HeapWord* block_start_const(const void* p) const;
 193 
 194   // Allocation (return NULL if full).  Assumes the caller has established
 195   // mutually exclusive access to the space.
 196   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 197   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 198   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 199 
 200   virtual HeapWord* allocate(size_t word_size);
 201   virtual HeapWord* par_allocate(size_t word_size);
 202 
 203   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
 204 
 205   // MarkSweep support phase3
 206   virtual HeapWord* initialize_threshold();
 207   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 208 
 209   virtual void print() const;
 210 
 211   void reset_bot() {
 212     _bot_part.reset_bot();
 213   }
 214 
 215   void print_bot_on(outputStream* out) {
 216     _bot_part.print_on(out);
 217   }
 218 };
 219 
 220 class HeapRegion: public G1ContiguousSpace {
 221   friend class VMStructs;
 222   // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
 223   template <typename SpaceType>
 224   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
 225  private:
 226 
 227   // The remembered set for this region.
 228   // (Might want to make this "inline" later, to avoid some alloc failure
 229   // issues.)
 230   HeapRegionRemSet* _rem_set;
 231 


 232   // Auxiliary functions for scan_and_forward support.
 233   // See comments for CompactibleSpace for more information.
 234   inline HeapWord* scan_limit() const {
 235     return top();
 236   }
 237 
 238   inline bool scanned_block_is_obj(const HeapWord* addr) const {
 239     return true; // Always true, since scan_limit is top
 240   }
 241 
 242   inline size_t scanned_block_size(const HeapWord* addr) const {
 243     return HeapRegion::block_size(addr); // Avoid virtual call
 244   }
 245 
 246  protected:
 247   // The index of this region in the heap region sequence.
 248   uint  _hrm_index;
 249 
 250   AllocationContext_t _allocation_context;
 251 


 307     _prev_top_at_mark_start = bot;
 308     _next_top_at_mark_start = bot;
 309   }
 310 
 311   // Cached attributes used in the collection set policy information
 312 
 313   // The RSet length that was added to the total value
 314   // for the collection set.
 315   size_t _recorded_rs_length;
 316 
 317   // The predicted elapsed time that was added to total value
 318   // for the collection set.
 319   double _predicted_elapsed_time_ms;
 320 
 321   // The predicted number of bytes to copy that was added to
 322   // the total value for the collection set.
 323   size_t _predicted_bytes_to_copy;
 324 
 325  public:
 326   HeapRegion(uint hrm_index,
 327              G1BlockOffsetTable* bot,
 328              MemRegion mr);
 329 
 330   // Initializing the HeapRegion not only resets the data structure, but also
 331   // resets the BOT for that heap region.
 332   // The default values for clear_space means that we will do the clearing if
 333   // there's clearing to be done ourselves. We also always mangle the space.
 334   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
 335 
 336   static int    LogOfHRGrainBytes;
 337   static int    LogOfHRGrainWords;
 338 
 339   static size_t GrainBytes;
 340   static size_t GrainWords;
 341   static size_t CardsPerRegion;
 342 
 343   static size_t align_up_to_region_byte_size(size_t sz) {
 344     return (sz + (size_t) GrainBytes - 1) &
 345                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
 346   }
 347 


< prev index next >