1 /* 2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_G1ALLOCATOR_HPP 26 #define SHARE_GC_G1_G1ALLOCATOR_HPP 27 28 #include "gc/g1/g1AllocRegion.hpp" 29 #include "gc/g1/g1HeapRegionAttr.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/plab.hpp" 32 33 class G1EvacuationInfo; 34 class G1NUMA; 35 36 // Interface to keep track of which regions G1 is currently allocating into. Provides 37 // some accessors (e.g. allocating into them, or getting their occupancy). 38 // Also keeps track of retained regions across GCs. 39 class G1Allocator : public CHeapObj<mtGC> { 40 friend class VMStructs; 41 42 private: 43 G1CollectedHeap* _g1h; 44 G1NUMA* _numa; 45 46 bool _survivor_is_full; 47 bool _old_is_full; 48 49 // The number of MutatorAllocRegions used, one per memory node. 50 size_t _num_alloc_regions; 51 52 // Alloc region used to satisfy mutator allocation requests. 53 MutatorAllocRegion* _mutator_alloc_regions; 54 55 // Alloc region used to satisfy allocation requests by the GC for 56 // survivor objects. 57 SurvivorGCAllocRegion* _survivor_gc_alloc_regions; 58 59 // Alloc region used to satisfy allocation requests by the GC for 60 // old objects. 61 OldGCAllocRegion _old_gc_alloc_region; 62 63 HeapRegion* _retained_old_gc_alloc_region; 64 65 bool survivor_is_full() const; 66 bool old_is_full() const; 67 68 void set_survivor_full(); 69 void set_old_full(); 70 71 void reuse_retained_old_region(G1EvacuationInfo& evacuation_info, 72 OldGCAllocRegion* old, 73 HeapRegion** retained); 74 75 // Accessors to the allocation regions. 76 inline MutatorAllocRegion* mutator_alloc_region(uint node_index); 77 inline SurvivorGCAllocRegion* survivor_gc_alloc_region(uint node_index); 78 inline OldGCAllocRegion* old_gc_alloc_region(); 79 80 // Allocation attempt during GC for a survivor object / PLAB. 81 HeapWord* survivor_attempt_allocation(size_t min_word_size, 82 size_t desired_word_size, 83 size_t* actual_word_size, 84 uint node_index); 85 86 // Allocation attempt during GC for an old object / PLAB. 87 HeapWord* old_attempt_allocation(size_t min_word_size, 88 size_t desired_word_size, 89 size_t* actual_word_size); 90 91 // Node index of current thread. 92 inline uint current_node_index() const; 93 94 public: 95 G1Allocator(G1CollectedHeap* heap); 96 ~G1Allocator(); 97 98 uint num_nodes() { return (uint)_num_alloc_regions; } 99 100 #ifdef ASSERT 101 // Do we currently have an active mutator region to allocate into? 102 bool has_mutator_alloc_region(); 103 #endif 104 105 void init_mutator_alloc_regions(); 106 void release_mutator_alloc_regions(); 107 108 void init_gc_alloc_regions(G1EvacuationInfo& evacuation_info); 109 void release_gc_alloc_regions(G1EvacuationInfo& evacuation_info); 110 void abandon_gc_alloc_regions(); 111 bool is_retained_old_region(HeapRegion* hr); 112 113 // Allocate blocks of memory during mutator time. 114 115 inline HeapWord* attempt_allocation(size_t min_word_size, 116 size_t desired_word_size, 117 size_t* actual_word_size); 118 inline HeapWord* attempt_allocation_locked(size_t word_size); 119 inline HeapWord* attempt_allocation_force(size_t word_size); 120 121 size_t unsafe_max_tlab_alloc(); 122 size_t used_in_alloc_regions(); 123 124 // Allocate blocks of memory during garbage collection. Will ensure an 125 // allocation region, either by picking one or expanding the 126 // heap, and then allocate a block of the given size. The block 127 // may not be a humongous - it must fit into a single heap region. 128 HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest, 129 size_t word_size, 130 uint node_index); 131 132 HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest, 133 size_t min_word_size, 134 size_t desired_word_size, 135 size_t* actual_word_size, 136 uint node_index); 137 }; 138 139 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs. 140 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some 141 // statistics. 142 class G1PLABAllocator : public CHeapObj<mtGC> { 143 friend class G1ParScanThreadState; 144 private: 145 typedef G1HeapRegionAttr::region_type_t region_type_t; 146 147 G1CollectedHeap* _g1h; 148 G1Allocator* _allocator; 149 150 PLAB** _alloc_buffers[G1HeapRegionAttr::Num]; 151 152 // The survivor alignment in effect in bytes. 153 // == 0 : don't align survivors 154 // != 0 : align survivors to that alignment 155 // These values were chosen to favor the non-alignment case since some 156 // architectures have a special compare against zero instructions. 157 const uint _survivor_alignment_bytes; 158 159 // Number of words allocated directly (not counting PLAB allocation). 160 size_t _direct_allocated[G1HeapRegionAttr::Num]; 161 162 void flush_and_retire_stats(); 163 inline PLAB* alloc_buffer(G1HeapRegionAttr dest, uint node_index) const; 164 inline PLAB* alloc_buffer(region_type_t dest, uint node_index) const; 165 166 // Returns the number of allocation buffers for the given dest. 167 // There is only 1 buffer for Old while Young may have multiple buffers depending on 168 // active NUMA nodes. 169 inline uint alloc_buffers_length(region_type_t dest) const; 170 171 // Calculate the survivor space object alignment in bytes. Returns that or 0 if 172 // there are no restrictions on survivor alignment. 173 static uint calc_survivor_alignment_bytes(); 174 175 bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const; 176 public: 177 G1PLABAllocator(G1Allocator* allocator); 178 ~G1PLABAllocator(); 179 180 size_t waste() const; 181 size_t undo_waste() const; 182 183 // Allocate word_sz words in dest, either directly into the regions or by 184 // allocating a new PLAB. Returns the address of the allocated memory, NULL if 185 // not successful. Plab_refill_failed indicates whether an attempt to refill the 186 // PLAB failed or not. 187 HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest, 188 size_t word_sz, 189 bool* plab_refill_failed, 190 uint node_index); 191 192 // Allocate word_sz words in the PLAB of dest. Returns the address of the 193 // allocated memory, NULL if not successful. 194 inline HeapWord* plab_allocate(G1HeapRegionAttr dest, 195 size_t word_sz, 196 uint node_index); 197 198 inline HeapWord* allocate(G1HeapRegionAttr dest, 199 size_t word_sz, 200 bool* refill_failed, 201 uint node_index); 202 203 void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index); 204 }; 205 206 // G1ArchiveRegionMap is a boolean array used to mark G1 regions as 207 // archive regions. This allows a quick check for whether an object 208 // should not be marked because it is in an archive region. 209 class G1ArchiveRegionMap : public G1BiasedMappedArray<bool> { 210 protected: 211 bool default_value() const { return false; } 212 }; 213 214 // G1ArchiveAllocator is used to allocate memory in archive 215 // regions. Such regions are not scavenged nor compacted by GC. 216 // There are two types of archive regions, which are 217 // differ in the kind of references allowed for the contained objects: 218 // 219 // - 'Closed' archive region contain no references outside of other 220 // closed archive regions. The region is immutable by GC. GC does 221 // not mark object header in 'closed' archive region. 222 // - An 'open' archive region allow references to any other regions, 223 // including closed archive, open archive and other java heap regions. 224 // GC can adjust pointers and mark object header in 'open' archive region. 225 class G1ArchiveAllocator : public CHeapObj<mtGC> { 226 protected: 227 bool _open; // Indicate if the region is 'open' archive. 228 G1CollectedHeap* _g1h; 229 230 // The current allocation region 231 HeapRegion* _allocation_region; 232 233 // Regions allocated for the current archive range. 234 GrowableArray<HeapRegion*> _allocated_regions; 235 236 // The number of bytes used in the current range. 237 size_t _summary_bytes_used; 238 239 // Current allocation window within the current region. 240 HeapWord* _bottom; 241 HeapWord* _top; 242 HeapWord* _max; 243 244 // Allocate a new region for this archive allocator. 245 // Allocation is from the top of the reserved heap downward. 246 bool alloc_new_region(); 247 248 public: 249 G1ArchiveAllocator(G1CollectedHeap* g1h, bool open) : 250 _open(open), 251 _g1h(g1h), 252 _allocation_region(NULL), 253 _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions, 254 ResourceObj::C_HEAP), 255 2), true /* C_Heap */), 256 _summary_bytes_used(0), 257 _bottom(NULL), 258 _top(NULL), 259 _max(NULL) { } 260 261 virtual ~G1ArchiveAllocator() { 262 assert(_allocation_region == NULL, "_allocation_region not NULL"); 263 } 264 265 static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h, bool open); 266 267 // Allocate memory for an individual object. 268 HeapWord* archive_mem_allocate(size_t word_size); 269 270 // Return the memory ranges used in the current archive, after 271 // aligning to the requested alignment. 272 void complete_archive(GrowableArray<MemRegion>* ranges, 273 size_t end_alignment_in_bytes); 274 275 // The number of bytes allocated by this allocator. 276 size_t used() { 277 return _summary_bytes_used; 278 } 279 280 // Clear the count of bytes allocated in prior G1 regions. This 281 // must be done when recalculate_use is used to reset the counter 282 // for the generic allocator, since it counts bytes in all G1 283 // regions, including those still associated with this allocator. 284 void clear_used() { 285 _summary_bytes_used = 0; 286 } 287 288 // Create the _archive_region_map which is used to identify archive objects. 289 static inline void enable_archive_object_check(); 290 291 // Mark regions containing the specified address range as archive/non-archive. 292 static inline void set_range_archive(MemRegion range, bool open); 293 static inline void clear_range_archive(MemRegion range, bool open); 294 295 // Check if the object is in closed archive 296 static inline bool is_closed_archive_object(oop object); 297 // Check if the object is in open archive 298 static inline bool is_open_archive_object(oop object); 299 // Check if the object is either in closed archive or open archive 300 static inline bool is_archived_object(oop object); 301 302 private: 303 static bool _archive_check_enabled; 304 static G1ArchiveRegionMap _closed_archive_region_map; 305 static G1ArchiveRegionMap _open_archive_region_map; 306 307 // Check if an object is in a closed archive region using the _closed_archive_region_map. 308 static inline bool in_closed_archive_range(oop object); 309 // Check if an object is in open archive region using the _open_archive_region_map. 310 static inline bool in_open_archive_range(oop object); 311 312 // Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range 313 // unnecessarily. 314 static inline bool archive_check_enabled(); 315 }; 316 317 #endif // SHARE_GC_G1_G1ALLOCATOR_HPP