1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1ALLOCATOR_HPP 26 #define SHARE_VM_GC_G1_G1ALLOCATOR_HPP 27 28 #include "gc/g1/g1AllocRegion.hpp" 29 #include "gc/g1/g1AllocationContext.hpp" 30 #include "gc/g1/g1InCSetState.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "gc/shared/plab.hpp" 33 34 class EvacuationInfo; 35 36 // Interface to keep track of which regions G1 is currently allocating into. Provides 37 // some accessors (e.g. allocating into them, or getting their occupancy). 38 // Also keeps track of retained regions across GCs. 39 class G1Allocator : public CHeapObj<mtGC> { 40 friend class VMStructs; 41 private: 42 bool _survivor_is_full; 43 bool _old_is_full; 44 protected: 45 G1CollectedHeap* _g1h; 46 47 virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0; 48 49 virtual bool survivor_is_full(AllocationContext_t context) const; 50 virtual bool old_is_full(AllocationContext_t context) const; 51 52 virtual void set_survivor_full(AllocationContext_t context); 53 virtual void set_old_full(AllocationContext_t context); 54 55 // Accessors to the allocation regions. 56 virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0; 57 virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0; 58 59 // Allocation attempt during GC for a survivor object / PLAB. 60 inline HeapWord* survivor_attempt_allocation(size_t word_size, 61 AllocationContext_t context); 62 // Allocation attempt during GC for an old object / PLAB. 63 inline HeapWord* old_attempt_allocation(size_t word_size, 64 AllocationContext_t context); 65 public: 66 G1Allocator(G1CollectedHeap* heap) : _g1h(heap), _survivor_is_full(false), _old_is_full(false) { } 67 virtual ~G1Allocator() { } 68 69 static G1Allocator* create_allocator(G1CollectedHeap* g1h); 70 71 #ifdef ASSERT 72 // Do we currently have an active mutator region to allocate into? 73 bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; } 74 #endif 75 virtual void init_mutator_alloc_region() = 0; 76 virtual void release_mutator_alloc_region() = 0; 77 78 virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info); 79 virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0; 80 virtual void abandon_gc_alloc_regions() = 0; 81 82 // Management of retained regions. 83 84 virtual bool is_retained_old_region(HeapRegion* hr) = 0; 85 void reuse_retained_old_region(EvacuationInfo& evacuation_info, 86 OldGCAllocRegion* old, 87 HeapRegion** retained); 88 89 // Allocate blocks of memory during mutator time. 90 91 inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context); 92 inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context); 93 inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context); 94 95 size_t unsafe_max_tlab_alloc(AllocationContext_t context); 96 97 // Allocate blocks of memory during garbage collection. Will ensure an 98 // allocation region, either by picking one or expanding the 99 // heap, and then allocate a block of the given size. The block 100 // may not be a humongous - it must fit into a single heap region. 101 HeapWord* par_allocate_during_gc(InCSetState dest, 102 size_t word_size, 103 AllocationContext_t context); 104 105 virtual size_t used_in_alloc_regions() = 0; 106 }; 107 108 // The default allocation region manager for G1. Provides a single mutator, survivor 109 // and old generation allocation region. 110 // Can retain the (single) old generation allocation region across GCs. 111 class G1DefaultAllocator : public G1Allocator { 112 protected: 113 // Alloc region used to satisfy mutator allocation requests. 114 MutatorAllocRegion _mutator_alloc_region; 115 116 // Alloc region used to satisfy allocation requests by the GC for 117 // survivor objects. 118 SurvivorGCAllocRegion _survivor_gc_alloc_region; 119 120 // Alloc region used to satisfy allocation requests by the GC for 121 // old objects. 122 OldGCAllocRegion _old_gc_alloc_region; 123 124 HeapRegion* _retained_old_gc_alloc_region; 125 public: 126 G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { } 127 128 virtual void init_mutator_alloc_region(); 129 virtual void release_mutator_alloc_region(); 130 131 virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info); 132 virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info); 133 virtual void abandon_gc_alloc_regions(); 134 135 virtual bool is_retained_old_region(HeapRegion* hr) { 136 return _retained_old_gc_alloc_region == hr; 137 } 138 139 virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) { 140 return &_mutator_alloc_region; 141 } 142 143 virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) { 144 return &_survivor_gc_alloc_region; 145 } 146 147 virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) { 148 return &_old_gc_alloc_region; 149 } 150 151 virtual size_t used_in_alloc_regions() { 152 assert(Heap_lock->owner() != NULL, 153 "Should be owned on this thread's behalf."); 154 size_t result = 0; 155 156 // Read only once in case it is set to NULL concurrently 157 HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get(); 158 if (hr != NULL) { 159 result += hr->used(); 160 } 161 return result; 162 } 163 }; 164 165 class G1PLAB: public PLAB { 166 private: 167 bool _retired; 168 169 public: 170 G1PLAB(size_t gclab_word_size); 171 virtual ~G1PLAB() { 172 guarantee(_retired, "Allocation buffer has not been retired"); 173 } 174 175 virtual void set_buf(HeapWord* buf) { 176 PLAB::set_buf(buf); 177 _retired = false; 178 } 179 180 virtual void retire() { 181 if (_retired) { 182 return; 183 } 184 PLAB::retire(); 185 _retired = true; 186 } 187 188 virtual void flush_and_retire_stats(PLABStats* stats) { 189 PLAB::flush_and_retire_stats(stats); 190 _retired = true; 191 } 192 }; 193 194 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs. 195 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some 196 // statistics. 197 class G1PLABAllocator : public CHeapObj<mtGC> { 198 friend class G1ParScanThreadState; 199 protected: 200 G1CollectedHeap* _g1h; 201 G1Allocator* _allocator; 202 203 // The survivor alignment in effect in bytes. 204 // == 0 : don't align survivors 205 // != 0 : align survivors to that alignment 206 // These values were chosen to favor the non-alignment case since some 207 // architectures have a special compare against zero instructions. 208 const uint _survivor_alignment_bytes; 209 210 virtual void retire_alloc_buffers() = 0; 211 virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0; 212 213 // Calculate the survivor space object alignment in bytes. Returns that or 0 if 214 // there are no restrictions on survivor alignment. 215 static uint calc_survivor_alignment_bytes() { 216 assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity"); 217 if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) { 218 // No need to align objects in the survivors differently, return 0 219 // which means "survivor alignment is not used". 220 return 0; 221 } else { 222 assert(SurvivorAlignmentInBytes > 0, "sanity"); 223 return SurvivorAlignmentInBytes; 224 } 225 } 226 227 HeapWord* allocate_new_plab(InCSetState dest, 228 size_t word_sz, 229 AllocationContext_t context); 230 231 public: 232 G1PLABAllocator(G1Allocator* allocator); 233 virtual ~G1PLABAllocator() { } 234 235 static G1PLABAllocator* create_allocator(G1Allocator* allocator); 236 237 virtual void waste(size_t& wasted, size_t& undo_wasted) = 0; 238 239 // Allocate word_sz words in dest, either directly into the regions or by 240 // allocating a new PLAB. Returns the address of the allocated memory, NULL if 241 // not successful. Plab_refill_failed indicates whether an attempt to refill the 242 // PLAB failed or not. 243 HeapWord* allocate_direct_or_new_plab(InCSetState dest, 244 size_t word_sz, 245 AllocationContext_t context, 246 bool* plab_refill_failed); 247 248 // Allocate word_sz words in the PLAB of dest. Returns the address of the 249 // allocated memory, NULL if not successful. 250 HeapWord* plab_allocate(InCSetState dest, 251 size_t word_sz, 252 AllocationContext_t context) { 253 G1PLAB* buffer = alloc_buffer(dest, context); 254 if (_survivor_alignment_bytes == 0 || !dest.is_young()) { 255 return buffer->allocate(word_sz); 256 } else { 257 return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes); 258 } 259 } 260 261 HeapWord* allocate(InCSetState dest, 262 size_t word_sz, 263 AllocationContext_t context, 264 bool* refill_failed) { 265 HeapWord* const obj = plab_allocate(dest, word_sz, context); 266 if (obj != NULL) { 267 return obj; 268 } 269 return allocate_direct_or_new_plab(dest, word_sz, context, refill_failed); 270 } 271 272 void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context); 273 }; 274 275 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor 276 // and old generation allocation. 277 class G1DefaultPLABAllocator : public G1PLABAllocator { 278 G1PLAB _surviving_alloc_buffer; 279 G1PLAB _tenured_alloc_buffer; 280 G1PLAB* _alloc_buffers[InCSetState::Num]; 281 282 public: 283 G1DefaultPLABAllocator(G1Allocator* _allocator); 284 285 virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) { 286 assert(dest.is_valid(), 287 err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value())); 288 assert(_alloc_buffers[dest.value()] != NULL, 289 err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value())); 290 return _alloc_buffers[dest.value()]; 291 } 292 293 virtual void retire_alloc_buffers(); 294 295 virtual void waste(size_t& wasted, size_t& undo_wasted); 296 }; 297 298 // G1ArchiveAllocator is used to allocate memory in archive 299 // regions. Such regions are not modifiable by GC, being neither 300 // scavenged nor compacted, or even marked in the object header. 301 // They can contain no pointers to non-archive heap regions, 302 class G1ArchiveAllocator : public CHeapObj<mtGC> { 303 304 protected: 305 G1CollectedHeap* _g1h; 306 307 // The current allocation region 308 HeapRegion* _allocation_region; 309 310 // Regions allocated for the current archive range. 311 GrowableArray<HeapRegion*> _allocated_regions; 312 313 // The number of bytes used in the current range. 314 size_t _summary_bytes_used; 315 316 // Current allocation window within the current region. 317 HeapWord* _bottom; 318 HeapWord* _top; 319 HeapWord* _max; 320 321 // Allocate a new region for this archive allocator. 322 // Allocation is from the top of the reserved heap downward. 323 bool alloc_new_region(); 324 325 public: 326 G1ArchiveAllocator(G1CollectedHeap* g1h) : 327 _g1h(g1h), 328 _allocation_region(NULL), 329 _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions, 330 ResourceObj::C_HEAP), 331 2), true /* C_Heap */), 332 _summary_bytes_used(0), 333 _bottom(NULL), 334 _top(NULL), 335 _max(NULL) { } 336 337 virtual ~G1ArchiveAllocator() { 338 assert(_allocation_region == NULL, "_allocation_region not NULL"); 339 } 340 341 static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h); 342 343 // Allocate memory for an individual object. 344 HeapWord* archive_mem_allocate(size_t word_size); 345 346 // Return the memory ranges used in the current archive, after 347 // aligning to the requested alignment. 348 void complete_archive(GrowableArray<MemRegion>* ranges, 349 size_t end_alignment_in_bytes); 350 351 // The number of bytes allocated by this allocator. 352 size_t used() { 353 return _summary_bytes_used; 354 } 355 356 // Clear the count of bytes allocated in prior G1 regions. This 357 // must be done when recalculate_use is used to reset the counter 358 // for the generic allocator, since it counts bytes in all G1 359 // regions, including those still associated with this allocator. 360 void clear_used() { 361 _summary_bytes_used = 0; 362 } 363 364 }; 365 366 #endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP