< prev index next >

src/share/vm/gc_implementation/g1/g1Allocator.hpp

Print this page
rev 7471 : 8060025: Object copy time regressions after JDK-8031323 and JDK-8057536
Summary: Evaluate and improve object copy time by micro-optimizations and splitting out slow and fast paths aggressively.
Reviewed-by:
Contributed-by: Tony Printezis <tprintezis@twitter.com>, Thomas Schatzl <thomas.schatzl@oracle.com>


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  31 
  32 enum GCAllocPurpose {
  33   GCAllocForTenured,
  34   GCAllocForSurvived,
  35   GCAllocPurposeCount















  36 };
  37 
  38 // Base class for G1 allocators.
  39 class G1Allocator : public CHeapObj<mtGC> {
  40   friend class VMStructs;
  41 protected:
  42   G1CollectedHeap* _g1h;
  43 
  44   // Outside of GC pauses, the number of bytes used in all regions other
  45   // than the current allocation region.
  46   size_t _summary_bytes_used;
  47 
  48 public:
  49    G1Allocator(G1CollectedHeap* heap) :
  50      _g1h(heap), _summary_bytes_used(0) { }
  51 
  52    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  53 
  54    virtual void init_mutator_alloc_region() = 0;
  55    virtual void release_mutator_alloc_region() = 0;


 161 
 162   virtual void set_buf(HeapWord* buf) {
 163     ParGCAllocBuffer::set_buf(buf);
 164     _retired = false;
 165   }
 166 
 167   virtual void retire(bool end_of_gc, bool retain) {
 168     if (_retired) {
 169       return;
 170     }
 171     ParGCAllocBuffer::retire(end_of_gc, retain);
 172     _retired = true;
 173   }
 174 };
 175 
 176 class G1ParGCAllocator : public CHeapObj<mtGC> {
 177   friend class G1ParScanThreadState;
 178 protected:
 179   G1CollectedHeap* _g1h;
 180 







 181   size_t _alloc_buffer_waste;
 182   size_t _undo_waste;
 183 
 184   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 185   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 186 
 187   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
 188 
 189   virtual void retire_alloc_buffers() = 0;
 190   virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;














 191 
 192 public:
 193   G1ParGCAllocator(G1CollectedHeap* g1h) :
 194     _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {

 195   }
 196 
 197   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 198 
 199   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 200   size_t undo_waste() {return _undo_waste; }
 201 
 202   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
 203     HeapWord* obj = NULL;
 204     if (purpose == GCAllocForSurvived) {
 205       obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);











 206     } else {
 207       obj = alloc_buffer(purpose, context)->allocate(word_sz);
 208     }





 209     if (obj != NULL) {
 210       return obj;
 211     }
 212     return allocate_slow(purpose, word_sz, context);
 213   }
 214 
 215   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 216     if (alloc_buffer(purpose, context)->contains(obj)) {
 217       assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
 218              "should contain whole object");
 219       alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
 220     } else {
 221       CollectedHeap::fill_with_object(obj, word_sz);
 222       add_to_undo_waste(word_sz);
 223     }
 224   }
 225 };
 226 
 227 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 228   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 229   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 230   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
 231 
 232 public:
 233   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 234 
 235   virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
 236     return _alloc_buffers[purpose];




 237   }
 238 
 239   virtual void retire_alloc_buffers() ;
 240 };
 241 
 242 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  31 
  32 typedef int8_t in_cset_state_t;
  33 
  34 // Helper class used to examine in_cset_t values.
  35 class InCSetState : AllStatic {
  36 public:
  37   enum {
  38     // Values <0 mean the region is a humongous region.
  39     NotInCSet    = 0,     // The region is not in the collection set.
  40     Young        = 1,     // The region is in the collection set and a young region.
  41     Old          = 2,     // The region is in the collection set and an old region.
  42     Num
  43   };
  44 
  45   static in_cset_state_t humongous() { return -1; }
  46 
  47   static bool is_not_in_cset(in_cset_state_t state) { return state == NotInCSet; }
  48   static bool is_in_cset_or_humongous(in_cset_state_t state) { return state != NotInCSet; }
  49   static bool is_in_cset(in_cset_state_t state) { return state > NotInCSet; }
  50   static bool is_humongous(in_cset_state_t state) { return state < NotInCSet; }
  51 };
  52 
  53 // Base class for G1 allocators.
  54 class G1Allocator : public CHeapObj<mtGC> {
  55   friend class VMStructs;
  56 protected:
  57   G1CollectedHeap* _g1h;
  58 
  59   // Outside of GC pauses, the number of bytes used in all regions other
  60   // than the current allocation region.
  61   size_t _summary_bytes_used;
  62 
  63 public:
  64    G1Allocator(G1CollectedHeap* heap) :
  65      _g1h(heap), _summary_bytes_used(0) { }
  66 
  67    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  68 
  69    virtual void init_mutator_alloc_region() = 0;
  70    virtual void release_mutator_alloc_region() = 0;


 176 
 177   virtual void set_buf(HeapWord* buf) {
 178     ParGCAllocBuffer::set_buf(buf);
 179     _retired = false;
 180   }
 181 
 182   virtual void retire(bool end_of_gc, bool retain) {
 183     if (_retired) {
 184       return;
 185     }
 186     ParGCAllocBuffer::retire(end_of_gc, retain);
 187     _retired = true;
 188   }
 189 };
 190 
 191 class G1ParGCAllocator : public CHeapObj<mtGC> {
 192   friend class G1ParScanThreadState;
 193 protected:
 194   G1CollectedHeap* _g1h;
 195 
 196   // The survivor alignment in effect in bytes.
 197   // == 0 : don't align survivors
 198   // != 0 : align survivors to that alignment
 199   // These values were chosen to favor the non-alignment case since some
 200   // architectures have a special compare against zero instructions.
 201   const uint _survivor_alignment_bytes;
 202 
 203   size_t _alloc_buffer_waste;
 204   size_t _undo_waste;
 205 
 206   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 207   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 208 


 209   virtual void retire_alloc_buffers() = 0;
 210   virtual G1ParGCAllocBuffer* alloc_buffer(in_cset_state_t dest, AllocationContext_t context) = 0;
 211 
 212   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 213   // there are no restrictions on survivor alignment.
 214   static uint calc_survivor_alignment_bytes() {
 215     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 216     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 217       // No need to align objects in the survivors differently, return 0
 218       // which means "survivor alignment is not used".
 219       return 0;
 220     } else {
 221       assert(SurvivorAlignmentInBytes > 0, "sanity");
 222       return SurvivorAlignmentInBytes;
 223     }
 224   }
 225 
 226 public:
 227   G1ParGCAllocator(G1CollectedHeap* g1h) :
 228     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
 229     _alloc_buffer_waste(0), _undo_waste(0) {
 230   }
 231 
 232   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 233 
 234   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 235   size_t undo_waste() {return _undo_waste; }
 236 
 237   // Allocate word_sz words in dest, either directly into the regions or by
 238   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 239   // not successful.
 240   HeapWord* allocate_direct_or_new_plab(in_cset_state_t dest,
 241                                         size_t word_sz,
 242                                         AllocationContext_t context);
 243 
 244   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 245   // allocated memory, NULL if not successful.
 246   HeapWord* plab_allocate(in_cset_state_t dest,
 247                           size_t word_sz,
 248                           AllocationContext_t context) {
 249     G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
 250     if (_survivor_alignment_bytes == 0) {
 251       return buffer->allocate(word_sz);
 252     } else {
 253       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 254     }
 255   }
 256 
 257   HeapWord* allocate(in_cset_state_t dest, size_t word_sz,
 258                      AllocationContext_t context) {
 259     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 260     if (obj != NULL) {
 261       return obj;
 262     }
 263     return allocate_direct_or_new_plab(dest, word_sz, context);
 264   }
 265 
 266   void undo_allocation(in_cset_state_t dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 267     if (alloc_buffer(dest, context)->contains(obj)) {
 268       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 269              "should contain whole object");
 270       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 271     } else {
 272       CollectedHeap::fill_with_object(obj, word_sz);
 273       add_to_undo_waste(word_sz);
 274     }
 275   }
 276 };
 277 
 278 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 279   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 280   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 281   G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
 282 
 283 public:
 284   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 285 
 286   virtual G1ParGCAllocBuffer* alloc_buffer(in_cset_state_t dest, AllocationContext_t context) {
 287     assert(dest < InCSetState::Num,
 288            err_msg("Allocation buffer index out-of-bounds: %d", dest));
 289     assert(_alloc_buffers[dest] != NULL,
 290            err_msg("Allocation buffer is NULL: %d", dest));
 291     return _alloc_buffers[dest];
 292   }
 293 
 294   virtual void retire_alloc_buffers() ;
 295 };
 296 
 297 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
< prev index next >