< prev index next >

src/share/vm/gc/g1/g1Allocator.hpp

Print this page
rev 8817 : [mq]: jon-review-statistics


 106 };
 107 
 108 // The default allocation region manager for G1. Provides a single mutator, survivor
 109 // and old generation allocation region.
 110 // Can retain the (single) old generation allocation region across GCs.
 111 class G1DefaultAllocator : public G1Allocator {
 112 protected:
 113   // Alloc region used to satisfy mutator allocation requests.
 114   MutatorAllocRegion _mutator_alloc_region;
 115 
 116   // Alloc region used to satisfy allocation requests by the GC for
 117   // survivor objects.
 118   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 119 
 120   // Alloc region used to satisfy allocation requests by the GC for
 121   // old objects.
 122   OldGCAllocRegion _old_gc_alloc_region;
 123 
 124   HeapRegion* _retained_old_gc_alloc_region;
 125 public:
 126   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
 127 
 128   virtual void init_mutator_alloc_region();
 129   virtual void release_mutator_alloc_region();
 130 
 131   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 132   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
 133   virtual void abandon_gc_alloc_regions();
 134 
 135   virtual bool is_retained_old_region(HeapRegion* hr) {
 136     return _retained_old_gc_alloc_region == hr;
 137   }
 138 
 139   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 140     return &_mutator_alloc_region;
 141   }
 142 
 143   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 144     return &_survivor_gc_alloc_region;
 145   }
 146 


 155 
 156     // Read only once in case it is set to NULL concurrently
 157     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 158     if (hr != NULL) {
 159       result += hr->used();
 160     }
 161     return result;
 162   }
 163 };
 164 
 165 class G1PLAB: public PLAB {
 166 private:
 167   bool _retired;
 168 
 169 public:
 170   G1PLAB(size_t gclab_word_size);
 171   virtual ~G1PLAB() {
 172     guarantee(_retired, "Allocation buffer has not been retired");
 173   }
 174 




 175   virtual void set_buf(HeapWord* buf) {
 176     PLAB::set_buf(buf);
 177     _retired = false;
 178   }
 179 
 180   virtual void retire() {
 181     if (_retired) {
 182       return;
 183     }
 184     PLAB::retire();
 185     _retired = true;
 186   }
 187 
 188   virtual void flush_and_retire_stats(PLABStats* stats) {
 189     PLAB::flush_and_retire_stats(stats);
 190     _retired = true;
 191   }
 192 };
 193 
 194 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 195 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 196 // statistics.
 197 class G1PLABAllocator : public CHeapObj<mtGC> {
 198   friend class G1ParScanThreadState;
 199 protected:
 200   G1CollectedHeap* _g1h;
 201   G1Allocator* _allocator;
 202 
 203   // The survivor alignment in effect in bytes.
 204   // == 0 : don't align survivors
 205   // != 0 : align survivors to that alignment
 206   // These values were chosen to favor the non-alignment case since some
 207   // architectures have a special compare against zero instructions.
 208   const uint _survivor_alignment_bytes;
 209 
 210   virtual void retire_alloc_buffers() = 0;



 211   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 212 
 213   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 214   // there are no restrictions on survivor alignment.
 215   static uint calc_survivor_alignment_bytes() {
 216     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 217     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 218       // No need to align objects in the survivors differently, return 0
 219       // which means "survivor alignment is not used".
 220       return 0;
 221     } else {
 222       assert(SurvivorAlignmentInBytes > 0, "sanity");
 223       return SurvivorAlignmentInBytes;
 224     }
 225   }
 226 
 227   HeapWord* allocate_new_plab(InCSetState dest,
 228                               size_t word_sz,
 229                               AllocationContext_t context);
 230 


 273 };
 274 
 275 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 276 // and old generation allocation.
 277 class G1DefaultPLABAllocator : public G1PLABAllocator {
 278   G1PLAB  _surviving_alloc_buffer;
 279   G1PLAB  _tenured_alloc_buffer;
 280   G1PLAB* _alloc_buffers[InCSetState::Num];
 281 
 282 public:
 283   G1DefaultPLABAllocator(G1Allocator* _allocator);
 284 
 285   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 286     assert(dest.is_valid(),
 287            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 288     assert(_alloc_buffers[dest.value()] != NULL,
 289            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 290     return _alloc_buffers[dest.value()];
 291   }
 292 
 293   virtual void retire_alloc_buffers();
 294 
 295   virtual void waste(size_t& wasted, size_t& undo_wasted);
 296 };
 297 
 298 // G1ArchiveAllocator is used to allocate memory in archive
 299 // regions. Such regions are not modifiable by GC, being neither
 300 // scavenged nor compacted, or even marked in the object header.
 301 // They can contain no pointers to non-archive heap regions,
 302 class G1ArchiveAllocator : public CHeapObj<mtGC> {
 303 
 304 protected:
 305   G1CollectedHeap* _g1h;
 306 
 307   // The current allocation region
 308   HeapRegion* _allocation_region;
 309 
 310   // Regions allocated for the current archive range.
 311   GrowableArray<HeapRegion*> _allocated_regions;
 312 
 313   // The number of bytes used in the current range.




 106 };
 107 
 108 // The default allocation region manager for G1. Provides a single mutator, survivor
 109 // and old generation allocation region.
 110 // Can retain the (single) old generation allocation region across GCs.
 111 class G1DefaultAllocator : public G1Allocator {
 112 protected:
 113   // Alloc region used to satisfy mutator allocation requests.
 114   MutatorAllocRegion _mutator_alloc_region;
 115 
 116   // Alloc region used to satisfy allocation requests by the GC for
 117   // survivor objects.
 118   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 119 
 120   // Alloc region used to satisfy allocation requests by the GC for
 121   // old objects.
 122   OldGCAllocRegion _old_gc_alloc_region;
 123 
 124   HeapRegion* _retained_old_gc_alloc_region;
 125 public:
 126   G1DefaultAllocator(G1CollectedHeap* heap);
 127 
 128   virtual void init_mutator_alloc_region();
 129   virtual void release_mutator_alloc_region();
 130 
 131   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 132   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
 133   virtual void abandon_gc_alloc_regions();
 134 
 135   virtual bool is_retained_old_region(HeapRegion* hr) {
 136     return _retained_old_gc_alloc_region == hr;
 137   }
 138 
 139   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 140     return &_mutator_alloc_region;
 141   }
 142 
 143   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 144     return &_survivor_gc_alloc_region;
 145   }
 146 


 155 
 156     // Read only once in case it is set to NULL concurrently
 157     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 158     if (hr != NULL) {
 159       result += hr->used();
 160     }
 161     return result;
 162   }
 163 };
 164 
 165 class G1PLAB: public PLAB {
 166 private:
 167   bool _retired;
 168 
 169 public:
 170   G1PLAB(size_t gclab_word_size);
 171   virtual ~G1PLAB() {
 172     guarantee(_retired, "Allocation buffer has not been retired");
 173   }
 174 
 175   // The amount of space in words wasted within the PLAB including
 176   // waste due to refills and alignment.
 177   size_t wasted() const { return _wasted; }
 178 
 179   virtual void set_buf(HeapWord* buf) {
 180     PLAB::set_buf(buf);
 181     _retired = false;
 182   }
 183 
 184   virtual void retire() {
 185     if (_retired) {
 186       return;
 187     }
 188     PLAB::retire();
 189     _retired = true;
 190   }
 191 
 192   virtual void flush_and_retire_stats(PLABStats* stats) {
 193     PLAB::flush_and_retire_stats(stats);
 194     _retired = true;
 195   }
 196 };
 197 
 198 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 199 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 200 // statistics.
 201 class G1PLABAllocator : public CHeapObj<mtGC> {
 202   friend class G1ParScanThreadState;
 203 protected:
 204   G1CollectedHeap* _g1h;
 205   G1Allocator* _allocator;
 206 
 207   // The survivor alignment in effect in bytes.
 208   // == 0 : don't align survivors
 209   // != 0 : align survivors to that alignment
 210   // These values were chosen to favor the non-alignment case since some
 211   // architectures have a special compare against zero instructions.
 212   const uint _survivor_alignment_bytes;
 213 
 214   // Number of words allocated directly (not counting PLAB allocation).
 215   size_t _direct_allocated[InCSetState::Num];
 216 
 217   virtual void flush_and_retire_stats() = 0;
 218   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 219 
 220   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 221   // there are no restrictions on survivor alignment.
 222   static uint calc_survivor_alignment_bytes() {
 223     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 224     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 225       // No need to align objects in the survivors differently, return 0
 226       // which means "survivor alignment is not used".
 227       return 0;
 228     } else {
 229       assert(SurvivorAlignmentInBytes > 0, "sanity");
 230       return SurvivorAlignmentInBytes;
 231     }
 232   }
 233 
 234   HeapWord* allocate_new_plab(InCSetState dest,
 235                               size_t word_sz,
 236                               AllocationContext_t context);
 237 


 280 };
 281 
 282 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 283 // and old generation allocation.
 284 class G1DefaultPLABAllocator : public G1PLABAllocator {
 285   G1PLAB  _surviving_alloc_buffer;
 286   G1PLAB  _tenured_alloc_buffer;
 287   G1PLAB* _alloc_buffers[InCSetState::Num];
 288 
 289 public:
 290   G1DefaultPLABAllocator(G1Allocator* _allocator);
 291 
 292   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 293     assert(dest.is_valid(),
 294            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 295     assert(_alloc_buffers[dest.value()] != NULL,
 296            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 297     return _alloc_buffers[dest.value()];
 298   }
 299 
 300   virtual void flush_and_retire_stats();
 301 
 302   virtual void waste(size_t& wasted, size_t& undo_wasted);
 303 };
 304 
 305 // G1ArchiveAllocator is used to allocate memory in archive
 306 // regions. Such regions are not modifiable by GC, being neither
 307 // scavenged nor compacted, or even marked in the object header.
 308 // They can contain no pointers to non-archive heap regions,
 309 class G1ArchiveAllocator : public CHeapObj<mtGC> {
 310 
 311 protected:
 312   G1CollectedHeap* _g1h;
 313 
 314   // The current allocation region
 315   HeapRegion* _allocation_region;
 316 
 317   // Regions allocated for the current archive range.
 318   GrowableArray<HeapRegion*> _allocated_regions;
 319 
 320   // The number of bytes used in the current range.


< prev index next >