1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1CollectorPolicy.hpp" 29 #include "gc/g1/g1MarkSweep.hpp" 30 #include "gc/g1/heapRegion.inline.hpp" 31 #include "gc/g1/heapRegionSet.inline.hpp" 32 33 void G1DefaultAllocator::init_mutator_alloc_region() { 34 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); 35 _mutator_alloc_region.init(); 36 } 37 38 void G1DefaultAllocator::release_mutator_alloc_region() { 39 _mutator_alloc_region.release(); 40 assert(_mutator_alloc_region.get() == NULL, "post-condition"); 41 } 42 43 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, 44 OldGCAllocRegion* old, 45 HeapRegion** retained_old) { 46 HeapRegion* retained_region = *retained_old; 47 *retained_old = NULL; 48 assert(retained_region == NULL || !retained_region->is_archive(), 49 err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index())); 50 51 // We will discard the current GC alloc region if: 52 // a) it's in the collection set (it can happen!), 53 // b) it's already full (no point in using it), 54 // c) it's empty (this means that it was emptied during 55 // a cleanup and it should be on the free list now), or 56 // d) it's humongous (this means that it was emptied 57 // during a cleanup and was added to the free list, but 58 // has been subsequently used to allocate a humongous 59 // object that may be less than the region size). 60 if (retained_region != NULL && 61 !retained_region->in_collection_set() && 62 !(retained_region->top() == retained_region->end()) && 63 !retained_region->is_empty() && 64 !retained_region->is_humongous()) { 65 retained_region->record_timestamp(); 66 // The retained region was added to the old region set when it was 67 // retired. We have to remove it now, since we don't allow regions 68 // we allocate to in the region sets. We'll re-add it later, when 69 // it's retired again. 70 _g1h->_old_set.remove(retained_region); 71 bool during_im = _g1h->collector_state()->during_initial_mark_pause(); 72 retained_region->note_start_of_copying(during_im); 73 old->set(retained_region); 74 _g1h->_hr_printer.reuse(retained_region); 75 evacuation_info.set_alloc_regions_used_before(retained_region->used()); 76 } 77 } 78 79 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { 80 assert_at_safepoint(true /* should_be_vm_thread */); 81 82 _survivor_gc_alloc_region.init(); 83 _old_gc_alloc_region.init(); 84 reuse_retained_old_region(evacuation_info, 85 &_old_gc_alloc_region, 86 &_retained_old_gc_alloc_region); 87 } 88 89 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) { 90 AllocationContext_t context = AllocationContext::current(); 91 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() + 92 old_gc_alloc_region(context)->count()); 93 survivor_gc_alloc_region(context)->release(); 94 // If we have an old GC alloc region to release, we'll save it in 95 // _retained_old_gc_alloc_region. If we don't 96 // _retained_old_gc_alloc_region will become NULL. This is what we 97 // want either way so no reason to check explicitly for either 98 // condition. 99 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release(); 100 if (_retained_old_gc_alloc_region != NULL) { 101 _retained_old_gc_alloc_region->record_retained_region(); 102 } 103 104 if (ResizePLAB) { 105 _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(); 106 _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(); 107 } 108 } 109 110 void G1DefaultAllocator::abandon_gc_alloc_regions() { 111 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); 112 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); 113 _retained_old_gc_alloc_region = NULL; 114 } 115 116 G1PLAB::G1PLAB(size_t gclab_word_size) : 117 PLAB(gclab_word_size), _retired(true) { } 118 119 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) { 120 // Return the remaining space in the cur alloc region, but not less than 121 // the min TLAB size. 122 123 // Also, this value can be at most the humongous object threshold, 124 // since we can't allow tlabs to grow big enough to accommodate 125 // humongous objects. 126 127 HeapRegion* hr = mutator_alloc_region(context)->get(); 128 size_t max_tlab = _g1h->max_tlab_size() * wordSize; 129 if (hr == NULL) { 130 return max_tlab; 131 } else { 132 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 133 } 134 } 135 136 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 137 size_t word_size, 138 AllocationContext_t context) { 139 switch (dest.value()) { 140 case InCSetState::Young: 141 return survivor_attempt_allocation(word_size, context); 142 case InCSetState::Old: 143 return old_attempt_allocation(word_size, context); 144 default: 145 ShouldNotReachHere(); 146 return NULL; // Keep some compilers happy 147 } 148 } 149 150 HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size, 151 AllocationContext_t context) { 152 assert(!_g1h->is_humongous(word_size), 153 "we should not be seeing humongous-size allocations in this path"); 154 155 HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size, 156 false /* bot_updates */); 157 if (result == NULL) { 158 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 159 result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, 160 false /* bot_updates */); 161 } 162 if (result != NULL) { 163 _g1h->dirty_young_block(result, word_size); 164 } 165 return result; 166 } 167 168 HeapWord* G1Allocator::old_attempt_allocation(size_t word_size, 169 AllocationContext_t context) { 170 assert(!_g1h->is_humongous(word_size), 171 "we should not be seeing humongous-size allocations in this path"); 172 173 HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size, 174 true /* bot_updates */); 175 if (result == NULL) { 176 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 177 result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size, 178 true /* bot_updates */); 179 } 180 return result; 181 } 182 183 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : 184 _g1h(G1CollectedHeap::heap()), 185 _allocator(allocator), 186 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { 187 } 188 189 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, 190 size_t word_sz, 191 AllocationContext_t context) { 192 size_t gclab_word_size = _g1h->desired_plab_sz(dest); 193 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 194 G1PLAB* alloc_buf = alloc_buffer(dest, context); 195 alloc_buf->retire(); 196 197 HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context); 198 if (buf == NULL) { 199 return NULL; // Let caller handle allocation failure. 200 } 201 // Otherwise. 202 alloc_buf->set_word_size(gclab_word_size); 203 alloc_buf->set_buf(buf); 204 205 HeapWord* const obj = alloc_buf->allocate(word_sz); 206 assert(obj != NULL, "buffer was definitely big enough..."); 207 return obj; 208 } else { 209 return _allocator->par_allocate_during_gc(dest, word_sz, context); 210 } 211 } 212 213 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) { 214 alloc_buffer(dest, context)->undo_allocation(obj, word_sz); 215 } 216 217 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) : 218 G1PLABAllocator(allocator), 219 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)), 220 _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) { 221 for (uint state = 0; state < InCSetState::Num; state++) { 222 _alloc_buffers[state] = NULL; 223 } 224 _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; 225 _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; 226 } 227 228 void G1DefaultPLABAllocator::retire_alloc_buffers() { 229 for (uint state = 0; state < InCSetState::Num; state++) { 230 G1PLAB* const buf = _alloc_buffers[state]; 231 if (buf != NULL) { 232 buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state)); 233 } 234 } 235 } 236 237 void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) { 238 wasted = 0; 239 undo_wasted = 0; 240 for (uint state = 0; state < InCSetState::Num; state++) { 241 G1PLAB * const buf = _alloc_buffers[state]; 242 if (buf != NULL) { 243 wasted += buf->waste(); 244 undo_wasted += buf->undo_waste(); 245 } 246 } 247 } 248 249 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) { 250 // Create the archive allocator, and also enable archive object checking 251 // in mark-sweep, since we will be creating archive regions. 252 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h); 253 G1MarkSweep::enable_archive_object_check(); 254 return result; 255 } 256 257 bool G1ArchiveAllocator::alloc_new_region() { 258 // Allocate the highest free region in the reserved heap, 259 // and add it to our list of allocated regions. It is marked 260 // archive and added to the old set. 261 HeapRegion* hr = _g1h->alloc_highest_free_region(); 262 if (hr == NULL) { 263 return false; 264 } 265 assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index())); 266 hr->set_archive(); 267 _g1h->_old_set.add(hr); 268 _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive); 269 _allocated_regions.append(hr); 270 _allocation_region = hr; 271 272 // Set up _bottom and _max to begin allocating in the lowest 273 // min_region_size'd chunk of the allocated G1 region. 274 _bottom = hr->bottom(); 275 _max = _bottom + HeapRegion::min_region_size_in_words(); 276 277 // Tell mark-sweep that objects in this region are not to be marked. 278 G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords)); 279 280 // Since we've modified the old set, call update_sizes. 281 _g1h->g1mm()->update_sizes(); 282 return true; 283 } 284 285 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { 286 assert(word_size != 0, "size must not be zero"); 287 if (_allocation_region == NULL) { 288 if (!alloc_new_region()) { 289 return NULL; 290 } 291 } 292 HeapWord* old_top = _allocation_region->top(); 293 assert(_bottom >= _allocation_region->bottom(), 294 err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, 295 p2i(_bottom), p2i(_allocation_region->bottom()))); 296 assert(_max <= _allocation_region->end(), 297 err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, 298 p2i(_max), p2i(_allocation_region->end()))); 299 assert(_bottom <= old_top && old_top <= _max, 300 err_msg("inconsistent allocation state: expected " 301 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, 302 p2i(_bottom), p2i(old_top), p2i(_max))); 303 304 // Allocate the next word_size words in the current allocation chunk. 305 // If allocation would cross the _max boundary, insert a filler and begin 306 // at the base of the next min_region_size'd chunk. Also advance to the next 307 // chunk if we don't yet cross the boundary, but the remainder would be too 308 // small to fill. 309 HeapWord* new_top = old_top + word_size; 310 size_t remainder = pointer_delta(_max, new_top); 311 if ((new_top > _max) || 312 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) { 313 if (old_top != _max) { 314 size_t fill_size = pointer_delta(_max, old_top); 315 CollectedHeap::fill_with_object(old_top, fill_size); 316 _summary_bytes_used += fill_size * HeapWordSize; 317 } 318 _allocation_region->set_top(_max); 319 old_top = _bottom = _max; 320 321 // Check if we've just used up the last min_region_size'd chunk 322 // in the current region, and if so, allocate a new one. 323 if (_bottom != _allocation_region->end()) { 324 _max = _bottom + HeapRegion::min_region_size_in_words(); 325 } else { 326 if (!alloc_new_region()) { 327 return NULL; 328 } 329 old_top = _allocation_region->bottom(); 330 } 331 } 332 _allocation_region->set_top(old_top + word_size); 333 _summary_bytes_used += word_size * HeapWordSize; 334 335 return old_top; 336 } 337 338 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges, 339 size_t end_alignment_in_bytes) { 340 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), 341 err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes)); 342 assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize), 343 err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize)); 344 345 // If we've allocated nothing, simply return. 346 if (_allocation_region == NULL) { 347 return; 348 } 349 350 // If an end alignment was requested, insert filler objects. 351 if (end_alignment_in_bytes != 0) { 352 HeapWord* currtop = _allocation_region->top(); 353 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes); 354 size_t fill_size = pointer_delta(newtop, currtop); 355 if (fill_size != 0) { 356 if (fill_size < CollectedHeap::min_fill_size()) { 357 // If the required fill is smaller than we can represent, 358 // bump up to the next aligned address. We know we won't exceed the current 359 // region boundary because the max supported alignment is smaller than the min 360 // region size, and because the allocation code never leaves space smaller than 361 // the min_fill_size at the top of the current allocation region. 362 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(), 363 end_alignment_in_bytes); 364 fill_size = pointer_delta(newtop, currtop); 365 } 366 HeapWord* fill = archive_mem_allocate(fill_size); 367 CollectedHeap::fill_with_objects(fill, fill_size); 368 } 369 } 370 371 // Loop through the allocated regions, and create MemRegions summarizing 372 // the allocated address range, combining contiguous ranges. Add the 373 // MemRegions to the GrowableArray provided by the caller. 374 int index = _allocated_regions.length() - 1; 375 assert(_allocated_regions.at(index) == _allocation_region, 376 err_msg("expected region %u at end of array, found %u", 377 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index())); 378 HeapWord* base_address = _allocation_region->bottom(); 379 HeapWord* top = base_address; 380 381 while (index >= 0) { 382 HeapRegion* next = _allocated_regions.at(index); 383 HeapWord* new_base = next->bottom(); 384 HeapWord* new_top = next->top(); 385 if (new_base != top) { 386 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 387 base_address = new_base; 388 } 389 top = new_top; 390 index = index - 1; 391 } 392 393 assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address))); 394 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 395 _allocated_regions.clear(); 396 _allocation_region = NULL; 397 };