< prev index next >

src/share/vm/gc_implementation/g1/g1Allocator.hpp

Print this page
rev 7471 : 8060025: Object copy time regressions after JDK-8031323 and JDK-8057536
Summary: Evaluate and improve object copy time by micro-optimizations and splitting out slow and fast paths aggressively.
Reviewed-by:
Contributed-by: Tony Printezis <tprintezis@twitter.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
rev 7472 : [mq]: 8060025-mikael-review1


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"

  30 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  31 
  32 enum GCAllocPurpose {
  33   GCAllocForTenured,
  34   GCAllocForSurvived,
  35   GCAllocPurposeCount
  36 };
  37 
  38 // Base class for G1 allocators.
  39 class G1Allocator : public CHeapObj<mtGC> {
  40   friend class VMStructs;
  41 protected:
  42   G1CollectedHeap* _g1h;
  43 
  44   // Outside of GC pauses, the number of bytes used in all regions other
  45   // than the current allocation region.
  46   size_t _summary_bytes_used;
  47 
  48 public:
  49    G1Allocator(G1CollectedHeap* heap) :
  50      _g1h(heap), _summary_bytes_used(0) { }
  51 
  52    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  53 
  54    virtual void init_mutator_alloc_region() = 0;
  55    virtual void release_mutator_alloc_region() = 0;
  56 
  57    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;


 161 
 162   virtual void set_buf(HeapWord* buf) {
 163     ParGCAllocBuffer::set_buf(buf);
 164     _retired = false;
 165   }
 166 
 167   virtual void retire(bool end_of_gc, bool retain) {
 168     if (_retired) {
 169       return;
 170     }
 171     ParGCAllocBuffer::retire(end_of_gc, retain);
 172     _retired = true;
 173   }
 174 };
 175 
 176 class G1ParGCAllocator : public CHeapObj<mtGC> {
 177   friend class G1ParScanThreadState;
 178 protected:
 179   G1CollectedHeap* _g1h;
 180 







 181   size_t _alloc_buffer_waste;
 182   size_t _undo_waste;
 183 
 184   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 185   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 186 
 187   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
 188 
 189   virtual void retire_alloc_buffers() = 0;
 190   virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;














 191 
 192 public:
 193   G1ParGCAllocator(G1CollectedHeap* g1h) :
 194     _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {

 195   }
 196 
 197   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 198 
 199   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 200   size_t undo_waste() {return _undo_waste; }
 201 
 202   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
 203     HeapWord* obj = NULL;
 204     if (purpose == GCAllocForSurvived) {
 205       obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);











 206     } else {
 207       obj = alloc_buffer(purpose, context)->allocate(word_sz);
 208     }





 209     if (obj != NULL) {
 210       return obj;
 211     }
 212     return allocate_slow(purpose, word_sz, context);
 213   }
 214 
 215   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 216     if (alloc_buffer(purpose, context)->contains(obj)) {
 217       assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
 218              "should contain whole object");
 219       alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
 220     } else {
 221       CollectedHeap::fill_with_object(obj, word_sz);
 222       add_to_undo_waste(word_sz);
 223     }
 224   }
 225 };
 226 
 227 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 228   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 229   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 230   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
 231 
 232 public:
 233   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 234 
 235   virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
 236     return _alloc_buffers[purpose];




 237   }
 238 
 239   virtual void retire_alloc_buffers() ;
 240 };
 241 
 242 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/g1/g1InCSetState.hpp"
  31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  32 






  33 // Base class for G1 allocators.
  34 class G1Allocator : public CHeapObj<mtGC> {
  35   friend class VMStructs;
  36 protected:
  37   G1CollectedHeap* _g1h;
  38 
  39   // Outside of GC pauses, the number of bytes used in all regions other
  40   // than the current allocation region.
  41   size_t _summary_bytes_used;
  42 
  43 public:
  44    G1Allocator(G1CollectedHeap* heap) :
  45      _g1h(heap), _summary_bytes_used(0) { }
  46 
  47    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  48 
  49    virtual void init_mutator_alloc_region() = 0;
  50    virtual void release_mutator_alloc_region() = 0;
  51 
  52    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;


 156 
 157   virtual void set_buf(HeapWord* buf) {
 158     ParGCAllocBuffer::set_buf(buf);
 159     _retired = false;
 160   }
 161 
 162   virtual void retire(bool end_of_gc, bool retain) {
 163     if (_retired) {
 164       return;
 165     }
 166     ParGCAllocBuffer::retire(end_of_gc, retain);
 167     _retired = true;
 168   }
 169 };
 170 
 171 class G1ParGCAllocator : public CHeapObj<mtGC> {
 172   friend class G1ParScanThreadState;
 173 protected:
 174   G1CollectedHeap* _g1h;
 175 
 176   // The survivor alignment in effect in bytes.
 177   // == 0 : don't align survivors
 178   // != 0 : align survivors to that alignment
 179   // These values were chosen to favor the non-alignment case since some
 180   // architectures have a special compare against zero instructions.
 181   const uint _survivor_alignment_bytes;
 182 
 183   size_t _alloc_buffer_waste;
 184   size_t _undo_waste;
 185 
 186   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 187   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 188 


 189   virtual void retire_alloc_buffers() = 0;
 190   virtual G1ParGCAllocBuffer* alloc_buffer(in_cset_state_t dest, AllocationContext_t context) = 0;
 191 
 192   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 193   // there are no restrictions on survivor alignment.
 194   static uint calc_survivor_alignment_bytes() {
 195     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 196     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 197       // No need to align objects in the survivors differently, return 0
 198       // which means "survivor alignment is not used".
 199       return 0;
 200     } else {
 201       assert(SurvivorAlignmentInBytes > 0, "sanity");
 202       return SurvivorAlignmentInBytes;
 203     }
 204   }
 205 
 206 public:
 207   G1ParGCAllocator(G1CollectedHeap* g1h) :
 208     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
 209     _alloc_buffer_waste(0), _undo_waste(0) {
 210   }
 211 
 212   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 213 
 214   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 215   size_t undo_waste() {return _undo_waste; }
 216 
 217   // Allocate word_sz words in dest, either directly into the regions or by
 218   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 219   // not successful.
 220   HeapWord* allocate_direct_or_new_plab(in_cset_state_t dest,
 221                                         size_t word_sz,
 222                                         AllocationContext_t context);
 223 
 224   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 225   // allocated memory, NULL if not successful.
 226   HeapWord* plab_allocate(in_cset_state_t dest,
 227                           size_t word_sz,
 228                           AllocationContext_t context) {
 229     G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
 230     if (_survivor_alignment_bytes == 0) {
 231       return buffer->allocate(word_sz);
 232     } else {
 233       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 234     }
 235   }
 236 
 237   HeapWord* allocate(in_cset_state_t dest, size_t word_sz,
 238                      AllocationContext_t context) {
 239     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 240     if (obj != NULL) {
 241       return obj;
 242     }
 243     return allocate_direct_or_new_plab(dest, word_sz, context);
 244   }
 245 
 246   void undo_allocation(in_cset_state_t dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 247     if (alloc_buffer(dest, context)->contains(obj)) {
 248       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 249              "should contain whole object");
 250       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 251     } else {
 252       CollectedHeap::fill_with_object(obj, word_sz);
 253       add_to_undo_waste(word_sz);
 254     }
 255   }
 256 };
 257 
 258 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 259   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 260   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 261   G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
 262 
 263 public:
 264   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 265 
 266   virtual G1ParGCAllocBuffer* alloc_buffer(in_cset_state_t dest, AllocationContext_t context) {
 267     assert(dest < InCSetState::Num,
 268            err_msg("Allocation buffer index out-of-bounds: %d", dest));
 269     assert(_alloc_buffers[dest] != NULL,
 270            err_msg("Allocation buffer is NULL: %d", dest));
 271     return _alloc_buffers[dest];
 272   }
 273 
 274   virtual void retire_alloc_buffers() ;
 275 };
 276 
 277 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
< prev index next >