1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1AllocRegion.inline.hpp" 28 #include "gc/g1/g1EvacStats.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1Policy.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionSet.inline.hpp" 33 #include "gc/g1/heapRegionType.hpp" 34 #include "utilities/align.hpp" 35 36 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) : 37 G1Allocator(heap), 38 _survivor_is_full(false), 39 _old_is_full(false), 40 _retained_old_gc_alloc_region(NULL), 41 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)), 42 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) { 43 } 44 45 void G1DefaultAllocator::init_mutator_alloc_region() { 46 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); 47 _mutator_alloc_region.init(); 48 } 49 50 void G1DefaultAllocator::release_mutator_alloc_region() { 51 _mutator_alloc_region.release(); 52 assert(_mutator_alloc_region.get() == NULL, "post-condition"); 53 } 54 55 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, 56 OldGCAllocRegion* old, 57 HeapRegion** retained_old) { 58 HeapRegion* retained_region = *retained_old; 59 *retained_old = NULL; 60 assert(retained_region == NULL || !retained_region->is_archive(), 61 "Archive region should not be alloc region (index %u)", retained_region->hrm_index()); 62 63 // We will discard the current GC alloc region if: 64 // a) it's in the collection set (it can happen!), 65 // b) it's already full (no point in using it), 66 // c) it's empty (this means that it was emptied during 67 // a cleanup and it should be on the free list now), or 68 // d) it's humongous (this means that it was emptied 69 // during a cleanup and was added to the free list, but 70 // has been subsequently used to allocate a humongous 71 // object that may be less than the region size). 72 if (retained_region != NULL && 73 !retained_region->in_collection_set() && 74 !(retained_region->top() == retained_region->end()) && 75 !retained_region->is_empty() && 76 !retained_region->is_humongous()) { 77 // The retained region was added to the old region set when it was 78 // retired. We have to remove it now, since we don't allow regions 79 // we allocate to in the region sets. We'll re-add it later, when 80 // it's retired again. 81 _g1h->old_set_remove(retained_region); 82 bool during_im = _g1h->collector_state()->in_initial_mark_gc(); 83 retained_region->note_start_of_copying(during_im); 84 old->set(retained_region); 85 _g1h->hr_printer()->reuse(retained_region); 86 evacuation_info.set_alloc_regions_used_before(retained_region->used()); 87 } 88 } 89 90 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { 91 assert_at_safepoint_on_vm_thread(); 92 93 _survivor_is_full = false; 94 _old_is_full = false; 95 96 _survivor_gc_alloc_region.init(); 97 _old_gc_alloc_region.init(); 98 reuse_retained_old_region(evacuation_info, 99 &_old_gc_alloc_region, 100 &_retained_old_gc_alloc_region); 101 } 102 103 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) { 104 evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() + 105 old_gc_alloc_region()->count()); 106 survivor_gc_alloc_region()->release(); 107 // If we have an old GC alloc region to release, we'll save it in 108 // _retained_old_gc_alloc_region. If we don't 109 // _retained_old_gc_alloc_region will become NULL. This is what we 110 // want either way so no reason to check explicitly for either 111 // condition. 112 _retained_old_gc_alloc_region = old_gc_alloc_region()->release(); 113 } 114 115 void G1DefaultAllocator::abandon_gc_alloc_regions() { 116 assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition"); 117 assert(old_gc_alloc_region()->get() == NULL, "pre-condition"); 118 _retained_old_gc_alloc_region = NULL; 119 } 120 121 bool G1DefaultAllocator::survivor_is_full() const { 122 return _survivor_is_full; 123 } 124 125 bool G1DefaultAllocator::old_is_full() const { 126 return _old_is_full; 127 } 128 129 void G1DefaultAllocator::set_survivor_full() { 130 _survivor_is_full = true; 131 } 132 133 void G1DefaultAllocator::set_old_full() { 134 _old_is_full = true; 135 } 136 137 size_t G1Allocator::unsafe_max_tlab_alloc() { 138 // Return the remaining space in the cur alloc region, but not less than 139 // the min TLAB size. 140 141 // Also, this value can be at most the humongous object threshold, 142 // since we can't allow tlabs to grow big enough to accommodate 143 // humongous objects. 144 145 HeapRegion* hr = mutator_alloc_region()->get(); 146 size_t max_tlab = _g1h->max_tlab_size() * wordSize; 147 if (hr == NULL) { 148 return max_tlab; 149 } else { 150 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 151 } 152 } 153 154 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 155 size_t word_size) { 156 size_t temp = 0; 157 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp); 158 assert(result == NULL || temp == word_size, 159 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 160 word_size, temp, p2i(result)); 161 return result; 162 } 163 164 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 165 size_t min_word_size, 166 size_t desired_word_size, 167 size_t* actual_word_size) { 168 switch (dest.value()) { 169 case InCSetState::Young: 170 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size); 171 case InCSetState::Old: 172 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size); 173 default: 174 ShouldNotReachHere(); 175 return NULL; // Keep some compilers happy 176 } 177 } 178 179 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, 180 size_t desired_word_size, 181 size_t* actual_word_size) { 182 assert(!_g1h->is_humongous(desired_word_size), 183 "we should not be seeing humongous-size allocations in this path"); 184 185 HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size, 186 desired_word_size, 187 actual_word_size); 188 if (result == NULL && !survivor_is_full()) { 189 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 190 result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size, 191 desired_word_size, 192 actual_word_size); 193 if (result == NULL) { 194 set_survivor_full(); 195 } 196 } 197 if (result != NULL) { 198 _g1h->dirty_young_block(result, *actual_word_size); 199 } 200 return result; 201 } 202 203 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, 204 size_t desired_word_size, 205 size_t* actual_word_size) { 206 assert(!_g1h->is_humongous(desired_word_size), 207 "we should not be seeing humongous-size allocations in this path"); 208 209 HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size, 210 desired_word_size, 211 actual_word_size); 212 if (result == NULL && !old_is_full()) { 213 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 214 result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size, 215 desired_word_size, 216 actual_word_size); 217 if (result == NULL) { 218 set_old_full(); 219 } 220 } 221 return result; 222 } 223 224 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : 225 _g1h(G1CollectedHeap::heap()), 226 _allocator(allocator), 227 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { 228 for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) { 229 _direct_allocated[i] = 0; 230 } 231 } 232 233 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { 234 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); 235 } 236 237 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, 238 size_t word_sz, 239 bool* plab_refill_failed) { 240 size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest); 241 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz); 242 243 // Only get a new PLAB if the allocation fits and it would not waste more than 244 // ParallelGCBufferWastePct in the existing buffer. 245 if ((required_in_plab <= plab_word_size) && 246 may_throw_away_buffer(required_in_plab, plab_word_size)) { 247 248 PLAB* alloc_buf = alloc_buffer(dest); 249 alloc_buf->retire(); 250 251 size_t actual_plab_size = 0; 252 HeapWord* buf = _allocator->par_allocate_during_gc(dest, 253 required_in_plab, 254 plab_word_size, 255 &actual_plab_size); 256 257 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), 258 "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 259 required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); 260 261 if (buf != NULL) { 262 alloc_buf->set_buf(buf, actual_plab_size); 263 264 HeapWord* const obj = alloc_buf->allocate(word_sz); 265 assert(obj != NULL, "PLAB should have been big enough, tried to allocate " 266 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT, 267 word_sz, required_in_plab, plab_word_size); 268 return obj; 269 } 270 // Otherwise. 271 *plab_refill_failed = true; 272 } 273 // Try direct allocation. 274 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz); 275 if (result != NULL) { 276 _direct_allocated[dest.value()] += word_sz; 277 } 278 return result; 279 } 280 281 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz) { 282 alloc_buffer(dest)->undo_allocation(obj, word_sz); 283 } 284 285 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) : 286 G1PLABAllocator(allocator), 287 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)), 288 _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) { 289 for (uint state = 0; state < InCSetState::Num; state++) { 290 _alloc_buffers[state] = NULL; 291 } 292 _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; 293 _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; 294 } 295 296 void G1DefaultPLABAllocator::flush_and_retire_stats() { 297 for (uint state = 0; state < InCSetState::Num; state++) { 298 PLAB* const buf = _alloc_buffers[state]; 299 if (buf != NULL) { 300 G1EvacStats* stats = _g1h->alloc_buffer_stats(state); 301 buf->flush_and_retire_stats(stats); 302 stats->add_direct_allocated(_direct_allocated[state]); 303 _direct_allocated[state] = 0; 304 } 305 } 306 } 307 308 void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) { 309 wasted = 0; 310 undo_wasted = 0; 311 for (uint state = 0; state < InCSetState::Num; state++) { 312 PLAB * const buf = _alloc_buffers[state]; 313 if (buf != NULL) { 314 wasted += buf->waste(); 315 undo_wasted += buf->undo_waste(); 316 } 317 } 318 } 319 320 bool G1ArchiveAllocator::_archive_check_enabled = false; 321 G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map; 322 G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map; 323 324 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) { 325 // Create the archive allocator, and also enable archive object checking 326 // in mark-sweep, since we will be creating archive regions. 327 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open); 328 enable_archive_object_check(); 329 return result; 330 } 331 332 bool G1ArchiveAllocator::alloc_new_region() { 333 // Allocate the highest free region in the reserved heap, 334 // and add it to our list of allocated regions. It is marked 335 // archive and added to the old set. 336 HeapRegion* hr = _g1h->alloc_highest_free_region(); 337 if (hr == NULL) { 338 return false; 339 } 340 assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); 341 if (_open) { 342 hr->set_open_archive(); 343 } else { 344 hr->set_closed_archive(); 345 } 346 _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr); 347 _g1h->old_set_add(hr); 348 _g1h->hr_printer()->alloc(hr); 349 _allocated_regions.append(hr); 350 _allocation_region = hr; 351 352 // Set up _bottom and _max to begin allocating in the lowest 353 // min_region_size'd chunk of the allocated G1 region. 354 _bottom = hr->bottom(); 355 _max = _bottom + HeapRegion::min_region_size_in_words(); 356 357 // Tell mark-sweep that objects in this region are not to be marked. 358 set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open); 359 360 // Since we've modified the old set, call update_sizes. 361 _g1h->g1mm()->update_sizes(); 362 return true; 363 } 364 365 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { 366 assert(word_size != 0, "size must not be zero"); 367 if (_allocation_region == NULL) { 368 if (!alloc_new_region()) { 369 return NULL; 370 } 371 } 372 HeapWord* old_top = _allocation_region->top(); 373 assert(_bottom >= _allocation_region->bottom(), 374 "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, 375 p2i(_bottom), p2i(_allocation_region->bottom())); 376 assert(_max <= _allocation_region->end(), 377 "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, 378 p2i(_max), p2i(_allocation_region->end())); 379 assert(_bottom <= old_top && old_top <= _max, 380 "inconsistent allocation state: expected " 381 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, 382 p2i(_bottom), p2i(old_top), p2i(_max)); 383 384 // Allocate the next word_size words in the current allocation chunk. 385 // If allocation would cross the _max boundary, insert a filler and begin 386 // at the base of the next min_region_size'd chunk. Also advance to the next 387 // chunk if we don't yet cross the boundary, but the remainder would be too 388 // small to fill. 389 HeapWord* new_top = old_top + word_size; 390 size_t remainder = pointer_delta(_max, new_top); 391 if ((new_top > _max) || 392 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) { 393 if (old_top != _max) { 394 size_t fill_size = pointer_delta(_max, old_top); 395 CollectedHeap::fill_with_object(old_top, fill_size); 396 _summary_bytes_used += fill_size * HeapWordSize; 397 } 398 _allocation_region->set_top(_max); 399 old_top = _bottom = _max; 400 401 // Check if we've just used up the last min_region_size'd chunk 402 // in the current region, and if so, allocate a new one. 403 if (_bottom != _allocation_region->end()) { 404 _max = _bottom + HeapRegion::min_region_size_in_words(); 405 } else { 406 if (!alloc_new_region()) { 407 return NULL; 408 } 409 old_top = _allocation_region->bottom(); 410 } 411 } 412 _allocation_region->set_top(old_top + word_size); 413 _summary_bytes_used += word_size * HeapWordSize; 414 415 return old_top; 416 } 417 418 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges, 419 size_t end_alignment_in_bytes) { 420 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), 421 "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes); 422 assert(is_aligned(end_alignment_in_bytes, HeapWordSize), 423 "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize); 424 425 // If we've allocated nothing, simply return. 426 if (_allocation_region == NULL) { 427 return; 428 } 429 430 // If an end alignment was requested, insert filler objects. 431 if (end_alignment_in_bytes != 0) { 432 HeapWord* currtop = _allocation_region->top(); 433 HeapWord* newtop = align_up(currtop, end_alignment_in_bytes); 434 size_t fill_size = pointer_delta(newtop, currtop); 435 if (fill_size != 0) { 436 if (fill_size < CollectedHeap::min_fill_size()) { 437 // If the required fill is smaller than we can represent, 438 // bump up to the next aligned address. We know we won't exceed the current 439 // region boundary because the max supported alignment is smaller than the min 440 // region size, and because the allocation code never leaves space smaller than 441 // the min_fill_size at the top of the current allocation region. 442 newtop = align_up(currtop + CollectedHeap::min_fill_size(), 443 end_alignment_in_bytes); 444 fill_size = pointer_delta(newtop, currtop); 445 } 446 HeapWord* fill = archive_mem_allocate(fill_size); 447 CollectedHeap::fill_with_objects(fill, fill_size); 448 } 449 } 450 451 // Loop through the allocated regions, and create MemRegions summarizing 452 // the allocated address range, combining contiguous ranges. Add the 453 // MemRegions to the GrowableArray provided by the caller. 454 int index = _allocated_regions.length() - 1; 455 assert(_allocated_regions.at(index) == _allocation_region, 456 "expected region %u at end of array, found %u", 457 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()); 458 HeapWord* base_address = _allocation_region->bottom(); 459 HeapWord* top = base_address; 460 461 while (index >= 0) { 462 HeapRegion* next = _allocated_regions.at(index); 463 HeapWord* new_base = next->bottom(); 464 HeapWord* new_top = next->top(); 465 if (new_base != top) { 466 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 467 base_address = new_base; 468 } 469 top = new_top; 470 index = index - 1; 471 } 472 473 assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address)); 474 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 475 _allocated_regions.clear(); 476 _allocation_region = NULL; 477 };