< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page
rev 8868 : imported patch 8067336-allow-that-plab-allocations-at-end-of-regions-are-flexible
rev 8869 : [mq]: refactor-desired-actual-size
rev 8870 : [mq]: tom-review


  92 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
  93 // be reconciled, then G1OffsetTableContigSpace could go away.
  94 
  95 // The idea behind time stamps is the following. We want to keep track of
  96 // the highest address where it's safe to scan objects for each region.
  97 // This is only relevant for current GC alloc regions so we keep a time stamp
  98 // per region to determine if the region has been allocated during the current
  99 // GC or not. If the time stamp is current we report a scan_top value which
 100 // was saved at the end of the previous GC for retained alloc regions and which is
 101 // equal to the bottom for all other regions.
 102 // There is a race between card scanners and allocating gc workers where we must ensure
 103 // that card scanners do not read the memory allocated by the gc workers.
 104 // In order to enforce that, we must not return a value of _top which is more recent than the
 105 // time stamp. This is due to the fact that a region may become a gc alloc region at
 106 // some point after we've read the timestamp value as being < the current time stamp.
 107 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 108 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 109 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 110 class G1OffsetTableContigSpace: public CompactibleSpace {
 111   friend class VMStructs;
 112   HeapWord* _top;
 113   HeapWord* volatile _scan_top;
 114  protected:
 115   G1BlockOffsetArrayContigSpace _offsets;
 116   Mutex _par_alloc_lock;
 117   volatile unsigned _gc_time_stamp;
 118   // When we need to retire an allocation region, while other threads
 119   // are also concurrently trying to allocate into it, we typically
 120   // allocate a dummy object at the end of the region to ensure that
 121   // no more allocations can take place in it. However, sometimes we
 122   // want to know where the end of the last "real" object we allocated
 123   // into the region was and this is what this keeps track.
 124   HeapWord* _pre_dummy_top;
 125 
 126  public:
 127   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 128                            MemRegion mr);
 129 
 130   void set_top(HeapWord* value) { _top = value; }
 131   HeapWord* top() const { return _top; }
 132 
 133  protected:
 134   // Reset the G1OffsetTableContigSpace.
 135   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 136 
 137   HeapWord** top_addr() { return &_top; }
 138   // Allocation helpers (return NULL if full).
 139   inline HeapWord* allocate_impl(size_t min_word_size, size_t* actual_word_size, HeapWord* end_value);
 140   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t* actual_word_size, HeapWord* end_value);








 141 
 142  public:
 143   void reset_after_compaction() { set_top(compaction_top()); }
 144 
 145   size_t used() const { return byte_size(bottom(), top()); }
 146   size_t free() const { return byte_size(top(), end()); }
 147   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 148 
 149   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 150 
 151   void object_iterate(ObjectClosure* blk);
 152   void safe_object_iterate(ObjectClosure* blk);
 153 
 154   void set_bottom(HeapWord* value);
 155   void set_end(HeapWord* value);
 156 
 157   void mangle_unused_area() PRODUCT_RETURN;
 158   void mangle_unused_area_complete() PRODUCT_RETURN;
 159 
 160   HeapWord* scan_top() const;


 164   void record_retained_region();
 165 
 166   // See the comment above in the declaration of _pre_dummy_top for an
 167   // explanation of what it is.
 168   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 169     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 170     _pre_dummy_top = pre_dummy_top;
 171   }
 172   HeapWord* pre_dummy_top() {
 173     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 174   }
 175   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 176 
 177   virtual void clear(bool mangle_space);
 178 
 179   HeapWord* block_start(const void* p);
 180   HeapWord* block_start_const(const void* p) const;
 181 
 182   // Allocation (return NULL if full).  Assumes the caller has established
 183   // mutually exclusive access to the space.
 184   HeapWord* allocate(size_t min_word_size, size_t* word_size);
 185   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 186   HeapWord* par_allocate(size_t min_word_size, size_t* word_size);
 187 
 188   virtual HeapWord* allocate(size_t word_size);
 189   virtual HeapWord* par_allocate(size_t word_size);
 190 
 191   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
 192 
 193   // MarkSweep support phase3
 194   virtual HeapWord* initialize_threshold();
 195   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 196 
 197   virtual void print() const;
 198 
 199   void reset_bot() {
 200     _offsets.reset_bot();
 201   }
 202 
 203   void print_bot_on(outputStream* out) {
 204     _offsets.print_on(out);
 205   }
 206 };


 339   static size_t min_region_size_in_words();
 340 
 341   // It sets up the heap region size (GrainBytes / GrainWords), as
 342   // well as other related fields that are based on the heap region
 343   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
 344   // CardsPerRegion). All those fields are considered constant
 345   // throughout the JVM's execution, therefore they should only be set
 346   // up once during initialization time.
 347   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 348 
 349   // All allocated blocks are occupied by objects in a HeapRegion
 350   bool block_is_obj(const HeapWord* p) const;
 351 
 352   // Returns the object size for all valid block starts
 353   // and the amount of unallocated words if called on top()
 354   size_t block_size(const HeapWord* p) const;
 355 
 356   // Override for scan_and_forward support.
 357   void prepare_for_compaction(CompactPoint* cp);
 358 
 359   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t* word_size);
 360   inline HeapWord* allocate_no_bot_updates(size_t word_size);
 361   inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t* word_size);
 362 
 363   // If this region is a member of a HeapRegionManager, the index in that
 364   // sequence, otherwise -1.
 365   uint hrm_index() const { return _hrm_index; }
 366 
 367   // The number of bytes marked live in the region in the last marking phase.
 368   size_t marked_bytes()    { return _prev_marked_bytes; }
 369   size_t live_bytes() {
 370     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 371   }
 372 
 373   // The number of bytes counted in the next marking.
 374   size_t next_marked_bytes() { return _next_marked_bytes; }
 375   // The number of bytes live wrt the next marking.
 376   size_t next_live_bytes() {
 377     return
 378       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 379   }
 380 
 381   // A lower bound on the amount of garbage bytes in the region.




  92 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
  93 // be reconciled, then G1OffsetTableContigSpace could go away.
  94 
  95 // The idea behind time stamps is the following. We want to keep track of
  96 // the highest address where it's safe to scan objects for each region.
  97 // This is only relevant for current GC alloc regions so we keep a time stamp
  98 // per region to determine if the region has been allocated during the current
  99 // GC or not. If the time stamp is current we report a scan_top value which
 100 // was saved at the end of the previous GC for retained alloc regions and which is
 101 // equal to the bottom for all other regions.
 102 // There is a race between card scanners and allocating gc workers where we must ensure
 103 // that card scanners do not read the memory allocated by the gc workers.
 104 // In order to enforce that, we must not return a value of _top which is more recent than the
 105 // time stamp. This is due to the fact that a region may become a gc alloc region at
 106 // some point after we've read the timestamp value as being < the current time stamp.
 107 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 108 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 109 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 110 class G1OffsetTableContigSpace: public CompactibleSpace {
 111   friend class VMStructs;
 112   HeapWord* volatile _top;
 113   HeapWord* volatile _scan_top;
 114  protected:
 115   G1BlockOffsetArrayContigSpace _offsets;
 116   Mutex _par_alloc_lock;
 117   volatile unsigned _gc_time_stamp;
 118   // When we need to retire an allocation region, while other threads
 119   // are also concurrently trying to allocate into it, we typically
 120   // allocate a dummy object at the end of the region to ensure that
 121   // no more allocations can take place in it. However, sometimes we
 122   // want to know where the end of the last "real" object we allocated
 123   // into the region was and this is what this keeps track.
 124   HeapWord* _pre_dummy_top;
 125 
 126  public:
 127   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 128                            MemRegion mr);
 129 
 130   void set_top(HeapWord* value) { _top = value; }
 131   HeapWord* top() const { return _top; }
 132 
 133  protected:
 134   // Reset the G1OffsetTableContigSpace.
 135   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 136 
 137   HeapWord* volatile* top_addr() { return &_top; }
 138   // Try to allocate at least min_word_size and up to desired_size from this Space.
 139   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 140   // space allocated.
 141   // This version assumes that all allocation requests to this Space are properly
 142   // synchronized.
 143   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 144   // Try to allocate at least min_word_size and up to desired_size from this Space.
 145   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
 146   // space allocated.
 147   // This version synchronizes with other calls to par_allocate_impl().
 148   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 149 
 150  public:
 151   void reset_after_compaction() { set_top(compaction_top()); }
 152 
 153   size_t used() const { return byte_size(bottom(), top()); }
 154   size_t free() const { return byte_size(top(), end()); }
 155   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 156 
 157   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 158 
 159   void object_iterate(ObjectClosure* blk);
 160   void safe_object_iterate(ObjectClosure* blk);
 161 
 162   void set_bottom(HeapWord* value);
 163   void set_end(HeapWord* value);
 164 
 165   void mangle_unused_area() PRODUCT_RETURN;
 166   void mangle_unused_area_complete() PRODUCT_RETURN;
 167 
 168   HeapWord* scan_top() const;


 172   void record_retained_region();
 173 
 174   // See the comment above in the declaration of _pre_dummy_top for an
 175   // explanation of what it is.
 176   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 177     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 178     _pre_dummy_top = pre_dummy_top;
 179   }
 180   HeapWord* pre_dummy_top() {
 181     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 182   }
 183   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 184 
 185   virtual void clear(bool mangle_space);
 186 
 187   HeapWord* block_start(const void* p);
 188   HeapWord* block_start_const(const void* p) const;
 189 
 190   // Allocation (return NULL if full).  Assumes the caller has established
 191   // mutually exclusive access to the space.
 192   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 193   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 194   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
 195 
 196   virtual HeapWord* allocate(size_t word_size);
 197   virtual HeapWord* par_allocate(size_t word_size);
 198 
 199   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
 200 
 201   // MarkSweep support phase3
 202   virtual HeapWord* initialize_threshold();
 203   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 204 
 205   virtual void print() const;
 206 
 207   void reset_bot() {
 208     _offsets.reset_bot();
 209   }
 210 
 211   void print_bot_on(outputStream* out) {
 212     _offsets.print_on(out);
 213   }
 214 };


 347   static size_t min_region_size_in_words();
 348 
 349   // It sets up the heap region size (GrainBytes / GrainWords), as
 350   // well as other related fields that are based on the heap region
 351   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
 352   // CardsPerRegion). All those fields are considered constant
 353   // throughout the JVM's execution, therefore they should only be set
 354   // up once during initialization time.
 355   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 356 
 357   // All allocated blocks are occupied by objects in a HeapRegion
 358   bool block_is_obj(const HeapWord* p) const;
 359 
 360   // Returns the object size for all valid block starts
 361   // and the amount of unallocated words if called on top()
 362   size_t block_size(const HeapWord* p) const;
 363 
 364   // Override for scan_and_forward support.
 365   void prepare_for_compaction(CompactPoint* cp);
 366 
 367   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
 368   inline HeapWord* allocate_no_bot_updates(size_t word_size);
 369   inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
 370 
 371   // If this region is a member of a HeapRegionManager, the index in that
 372   // sequence, otherwise -1.
 373   uint hrm_index() const { return _hrm_index; }
 374 
 375   // The number of bytes marked live in the region in the last marking phase.
 376   size_t marked_bytes()    { return _prev_marked_bytes; }
 377   size_t live_bytes() {
 378     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 379   }
 380 
 381   // The number of bytes counted in the next marking.
 382   size_t next_marked_bytes() { return _next_marked_bytes; }
 383   // The number of bytes live wrt the next marking.
 384   size_t next_live_bytes() {
 385     return
 386       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 387   }
 388 
 389   // A lower bound on the amount of garbage bytes in the region.


< prev index next >