< prev index next >

src/share/vm/gc_implementation/g1/g1Allocator.hpp

Print this page




 130   }
 131 
 132   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 133     return &_old_gc_alloc_region;
 134   }
 135 
 136   virtual size_t used() {
 137     assert(Heap_lock->owner() != NULL,
 138            "Should be owned on this thread's behalf.");
 139     size_t result = _summary_bytes_used;
 140 
 141     // Read only once in case it is set to NULL concurrently
 142     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 143     if (hr != NULL) {
 144       result += hr->used();
 145     }
 146     return result;
 147   }
 148 };
 149 


























 150 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
 151 private:
 152   bool _retired;
 153 
 154 public:
 155   G1ParGCAllocBuffer(size_t gclab_word_size);
 156   virtual ~G1ParGCAllocBuffer() {
 157     guarantee(_retired, "Allocation buffer has not been retired");
 158   }
 159 
 160   virtual void set_buf(HeapWord* buf) {
 161     ParGCAllocBuffer::set_buf(buf);
 162     _retired = false;
 163   }
 164 
 165   virtual void retire() {
 166     if (_retired) {
 167       return;
 168     }
 169     ParGCAllocBuffer::retire();
 170     _retired = true;
 171   }
 172 };
 173 
 174 class G1ParGCAllocator : public CHeapObj<mtGC> {
 175   friend class G1ParScanThreadState;
 176 protected:
 177   G1CollectedHeap* _g1h;
 178 
 179   // The survivor alignment in effect in bytes.
 180   // == 0 : don't align survivors
 181   // != 0 : align survivors to that alignment
 182   // These values were chosen to favor the non-alignment case since some
 183   // architectures have a special compare against zero instructions.
 184   const uint _survivor_alignment_bytes;
 185 
 186   size_t _alloc_buffer_waste;
 187   size_t _undo_waste;
 188 
 189   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 190   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 191 
 192   virtual void retire_alloc_buffers() = 0;
 193   virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 194 
 195   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 196   // there are no restrictions on survivor alignment.
 197   static uint calc_survivor_alignment_bytes() {
 198     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 199     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 200       // No need to align objects in the survivors differently, return 0
 201       // which means "survivor alignment is not used".
 202       return 0;
 203     } else {
 204       assert(SurvivorAlignmentInBytes > 0, "sanity");
 205       return SurvivorAlignmentInBytes;
 206     }
 207   }
 208 
 209 public:
 210   G1ParGCAllocator(G1CollectedHeap* g1h) :
 211     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
 212     _alloc_buffer_waste(0), _undo_waste(0) {
 213   }
 214 
 215   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 216 
 217   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 218   size_t undo_waste() {return _undo_waste; }
 219 
 220   // Allocate word_sz words in dest, either directly into the regions or by
 221   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 222   // not successful.
 223   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 224                                         size_t word_sz,
 225                                         AllocationContext_t context);
 226 
 227   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 228   // allocated memory, NULL if not successful.
 229   HeapWord* plab_allocate(InCSetState dest,
 230                           size_t word_sz,
 231                           AllocationContext_t context) {
 232     G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
 233     if (_survivor_alignment_bytes == 0) {
 234       return buffer->allocate(word_sz);
 235     } else {
 236       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 237     }
 238   }
 239 
 240   HeapWord* allocate(InCSetState dest, size_t word_sz,
 241                      AllocationContext_t context) {
 242     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 243     if (obj != NULL) {
 244       return obj;
 245     }
 246     return allocate_direct_or_new_plab(dest, word_sz, context);
 247   }
 248 
 249   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 250     if (alloc_buffer(dest, context)->contains(obj)) {
 251       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 252              "should contain whole object");
 253       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 254     } else {
 255       CollectedHeap::fill_with_object(obj, word_sz);
 256       add_to_undo_waste(word_sz);
 257     }
 258   }
 259 };
 260 
 261 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 262   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 263   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 264   G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
 265 
 266 public:
 267   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 268 
 269   virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 270     assert(dest.is_valid(),
 271            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 272     assert(_alloc_buffers[dest.value()] != NULL,
 273            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 274     return _alloc_buffers[dest.value()];
 275   }
 276 
 277   virtual void retire_alloc_buffers() ;


 278 };
 279 
 280 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP


 130   }
 131 
 132   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 133     return &_old_gc_alloc_region;
 134   }
 135 
 136   virtual size_t used() {
 137     assert(Heap_lock->owner() != NULL,
 138            "Should be owned on this thread's behalf.");
 139     size_t result = _summary_bytes_used;
 140 
 141     // Read only once in case it is set to NULL concurrently
 142     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 143     if (hr != NULL) {
 144       result += hr->used();
 145     }
 146     return result;
 147   }
 148 };
 149 
 150 class G1PLABWasteStat VALUE_OBJ_CLASS_SPEC {
 151 private:
 152   size_t _wasted;
 153   size_t _undo_wasted;
 154 public:
 155 
 156   G1PLABWasteStat() : _wasted(0), _undo_wasted(0) {
 157   }
 158 
 159   void add_wasted(size_t word_sz) {
 160     _wasted += word_sz;
 161   }
 162 
 163   void add_undo_wasted(size_t word_sz) {
 164     _undo_wasted += word_sz;
 165   }
 166 
 167   size_t wasted() {
 168     return _wasted;
 169   }
 170 
 171   size_t undo_wasted() {
 172     return _undo_wasted;
 173   }
 174 };
 175 
 176 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
 177 private:
 178   bool _retired;
 179 
 180 public:
 181   G1ParGCAllocBuffer(size_t gclab_word_size);
 182   virtual ~G1ParGCAllocBuffer() {
 183     guarantee(_retired, "Allocation buffer has not been retired");
 184   }
 185 
 186   virtual void set_buf(HeapWord* buf) {
 187     ParGCAllocBuffer::set_buf(buf);
 188     _retired = false;
 189   }
 190 
 191   virtual void retire() {
 192     if (_retired) {
 193       return;
 194     }
 195     ParGCAllocBuffer::retire();
 196     _retired = true;
 197   }
 198 };
 199 
 200 class G1ParGCAllocator : public CHeapObj<mtGC> {
 201   friend class G1ParScanThreadState;
 202 protected:
 203   G1CollectedHeap* _g1h;
 204 
 205   // The survivor alignment in effect in bytes.
 206   // == 0 : don't align survivors
 207   // != 0 : align survivors to that alignment
 208   // These values were chosen to favor the non-alignment case since some
 209   // architectures have a special compare against zero instructions.
 210   const uint _survivor_alignment_bytes;
 211 






 212   virtual void retire_alloc_buffers() = 0;
 213   virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 214 
 215   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 216   // there are no restrictions on survivor alignment.
 217   static uint calc_survivor_alignment_bytes() {
 218     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 219     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 220       // No need to align objects in the survivors differently, return 0
 221       // which means "survivor alignment is not used".
 222       return 0;
 223     } else {
 224       assert(SurvivorAlignmentInBytes > 0, "sanity");
 225       return SurvivorAlignmentInBytes;
 226     }
 227   }
 228 
 229 public:
 230   G1ParGCAllocator(G1CollectedHeap* g1h) :
 231     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {

 232   }
 233 
 234   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 235 
 236   virtual G1PLABWasteStat wasted() = 0;

 237   
 238   // Allocate word_sz words in dest, either directly into the regions or by
 239   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 240   // not successful.
 241   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 242                                         size_t word_sz,
 243                                         AllocationContext_t context);
 244 
 245   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 246   // allocated memory, NULL if not successful.
 247   HeapWord* plab_allocate(InCSetState dest,
 248                           size_t word_sz,
 249                           AllocationContext_t context) {
 250     G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
 251     if (_survivor_alignment_bytes == 0) {
 252       return buffer->allocate(word_sz);
 253     } else {
 254       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 255     }
 256   }
 257 
 258   HeapWord* allocate(InCSetState dest, size_t word_sz,
 259                      AllocationContext_t context) {
 260     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 261     if (obj != NULL) {
 262       return obj;
 263     }
 264     return allocate_direct_or_new_plab(dest, word_sz, context);
 265   }
 266 
 267   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {



 268     alloc_buffer(dest, context)->undo_allocation(obj, word_sz);




 269   }
 270 };
 271 
 272 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 273   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 274   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 275   G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
 276 
 277 public:
 278   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 279 
 280   virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 281     assert(dest.is_valid(),
 282            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 283     assert(_alloc_buffers[dest.value()] != NULL,
 284            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 285     return _alloc_buffers[dest.value()];
 286   }
 287 
 288   virtual void retire_alloc_buffers();
 289 
 290   virtual G1PLABWasteStat wasted();
 291 };
 292 
 293 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
< prev index next >