1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1AllocRegion.inline.hpp" 28 #include "gc/g1/g1EvacStats.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1Policy.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionSet.inline.hpp" 33 #include "gc/g1/heapRegionType.hpp" 34 #include "gc/shared/fill.hpp" 35 #include "utilities/align.hpp" 36 37 G1Allocator::G1Allocator(G1CollectedHeap* heap) : 38 _g1h(heap), 39 _survivor_is_full(false), 40 _old_is_full(false), 41 _mutator_alloc_region(), 42 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)), 43 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)), 44 _retained_old_gc_alloc_region(NULL) { 45 } 46 47 void G1Allocator::init_mutator_alloc_region() { 48 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); 49 _mutator_alloc_region.init(); 50 } 51 52 void G1Allocator::release_mutator_alloc_region() { 53 _mutator_alloc_region.release(); 54 assert(_mutator_alloc_region.get() == NULL, "post-condition"); 55 } 56 57 bool G1Allocator::is_retained_old_region(HeapRegion* hr) { 58 return _retained_old_gc_alloc_region == hr; 59 } 60 61 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, 62 OldGCAllocRegion* old, 63 HeapRegion** retained_old) { 64 HeapRegion* retained_region = *retained_old; 65 *retained_old = NULL; 66 assert(retained_region == NULL || !retained_region->is_archive(), 67 "Archive region should not be alloc region (index %u)", retained_region->hrm_index()); 68 69 // We will discard the current GC alloc region if: 70 // a) it's in the collection set (it can happen!), 71 // b) it's already full (no point in using it), 72 // c) it's empty (this means that it was emptied during 73 // a cleanup and it should be on the free list now), or 74 // d) it's humongous (this means that it was emptied 75 // during a cleanup and was added to the free list, but 76 // has been subsequently used to allocate a humongous 77 // object that may be less than the region size). 78 if (retained_region != NULL && 79 !retained_region->in_collection_set() && 80 !(retained_region->top() == retained_region->end()) && 81 !retained_region->is_empty() && 82 !retained_region->is_humongous()) { 83 // The retained region was added to the old region set when it was 84 // retired. We have to remove it now, since we don't allow regions 85 // we allocate to in the region sets. We'll re-add it later, when 86 // it's retired again. 87 _g1h->old_set_remove(retained_region); 88 bool during_im = _g1h->collector_state()->in_initial_mark_gc(); 89 retained_region->note_start_of_copying(during_im); 90 old->set(retained_region); 91 _g1h->hr_printer()->reuse(retained_region); 92 evacuation_info.set_alloc_regions_used_before(retained_region->used()); 93 } 94 } 95 96 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { 97 assert_at_safepoint_on_vm_thread(); 98 99 _survivor_is_full = false; 100 _old_is_full = false; 101 102 _survivor_gc_alloc_region.init(); 103 _old_gc_alloc_region.init(); 104 reuse_retained_old_region(evacuation_info, 105 &_old_gc_alloc_region, 106 &_retained_old_gc_alloc_region); 107 } 108 109 void G1Allocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) { 110 evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() + 111 old_gc_alloc_region()->count()); 112 survivor_gc_alloc_region()->release(); 113 // If we have an old GC alloc region to release, we'll save it in 114 // _retained_old_gc_alloc_region. If we don't 115 // _retained_old_gc_alloc_region will become NULL. This is what we 116 // want either way so no reason to check explicitly for either 117 // condition. 118 _retained_old_gc_alloc_region = old_gc_alloc_region()->release(); 119 } 120 121 void G1Allocator::abandon_gc_alloc_regions() { 122 assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition"); 123 assert(old_gc_alloc_region()->get() == NULL, "pre-condition"); 124 _retained_old_gc_alloc_region = NULL; 125 } 126 127 bool G1Allocator::survivor_is_full() const { 128 return _survivor_is_full; 129 } 130 131 bool G1Allocator::old_is_full() const { 132 return _old_is_full; 133 } 134 135 void G1Allocator::set_survivor_full() { 136 _survivor_is_full = true; 137 } 138 139 void G1Allocator::set_old_full() { 140 _old_is_full = true; 141 } 142 143 size_t G1Allocator::unsafe_max_tlab_alloc() { 144 // Return the remaining space in the cur alloc region, but not less than 145 // the min TLAB size. 146 147 // Also, this value can be at most the humongous object threshold, 148 // since we can't allow tlabs to grow big enough to accommodate 149 // humongous objects. 150 151 HeapRegion* hr = mutator_alloc_region()->get(); 152 size_t max_tlab = _g1h->max_tlab_size() * wordSize; 153 if (hr == NULL) { 154 return max_tlab; 155 } else { 156 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 157 } 158 } 159 160 size_t G1Allocator::used_in_alloc_regions() { 161 assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf."); 162 return mutator_alloc_region()->used_in_alloc_regions(); 163 } 164 165 166 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 167 size_t word_size) { 168 size_t temp = 0; 169 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp); 170 assert(result == NULL || temp == word_size, 171 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 172 word_size, temp, p2i(result)); 173 return result; 174 } 175 176 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 177 size_t min_word_size, 178 size_t desired_word_size, 179 size_t* actual_word_size) { 180 switch (dest.value()) { 181 case InCSetState::Young: 182 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size); 183 case InCSetState::Old: 184 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size); 185 default: 186 ShouldNotReachHere(); 187 return NULL; // Keep some compilers happy 188 } 189 } 190 191 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, 192 size_t desired_word_size, 193 size_t* actual_word_size) { 194 assert(!_g1h->is_humongous(desired_word_size), 195 "we should not be seeing humongous-size allocations in this path"); 196 197 HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size, 198 desired_word_size, 199 actual_word_size); 200 if (result == NULL && !survivor_is_full()) { 201 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 202 result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size, 203 desired_word_size, 204 actual_word_size); 205 if (result == NULL) { 206 set_survivor_full(); 207 } 208 } 209 if (result != NULL) { 210 _g1h->dirty_young_block(result, *actual_word_size); 211 } 212 return result; 213 } 214 215 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, 216 size_t desired_word_size, 217 size_t* actual_word_size) { 218 assert(!_g1h->is_humongous(desired_word_size), 219 "we should not be seeing humongous-size allocations in this path"); 220 221 HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size, 222 desired_word_size, 223 actual_word_size); 224 if (result == NULL && !old_is_full()) { 225 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 226 result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size, 227 desired_word_size, 228 actual_word_size); 229 if (result == NULL) { 230 set_old_full(); 231 } 232 } 233 return result; 234 } 235 236 uint G1PLABAllocator::calc_survivor_alignment_bytes() { 237 assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity"); 238 if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) { 239 // No need to align objects in the survivors differently, return 0 240 // which means "survivor alignment is not used". 241 return 0; 242 } else { 243 assert(SurvivorAlignmentInBytes > 0, "sanity"); 244 return SurvivorAlignmentInBytes; 245 } 246 } 247 248 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : 249 _g1h(G1CollectedHeap::heap()), 250 _allocator(allocator), 251 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)), 252 _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)), 253 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { 254 for (uint state = 0; state < InCSetState::Num; state++) { 255 _direct_allocated[state] = 0; 256 _alloc_buffers[state] = NULL; 257 } 258 _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; 259 _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; 260 } 261 262 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { 263 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); 264 } 265 266 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, 267 size_t word_sz, 268 bool* plab_refill_failed) { 269 size_t plab_word_size = _g1h->desired_plab_sz(dest); 270 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz); 271 272 // Only get a new PLAB if the allocation fits and it would not waste more than 273 // ParallelGCBufferWastePct in the existing buffer. 274 if ((required_in_plab <= plab_word_size) && 275 may_throw_away_buffer(required_in_plab, plab_word_size)) { 276 277 PLAB* alloc_buf = alloc_buffer(dest); 278 alloc_buf->retire(); 279 280 size_t actual_plab_size = 0; 281 HeapWord* buf = _allocator->par_allocate_during_gc(dest, 282 required_in_plab, 283 plab_word_size, 284 &actual_plab_size); 285 286 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), 287 "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 288 required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); 289 290 if (buf != NULL) { 291 alloc_buf->set_buf(buf, actual_plab_size); 292 293 HeapWord* const obj = alloc_buf->allocate(word_sz); 294 assert(obj != NULL, "PLAB should have been big enough, tried to allocate " 295 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT, 296 word_sz, required_in_plab, plab_word_size); 297 return obj; 298 } 299 // Otherwise. 300 *plab_refill_failed = true; 301 } 302 // Try direct allocation. 303 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz); 304 if (result != NULL) { 305 _direct_allocated[dest.value()] += word_sz; 306 } 307 return result; 308 } 309 310 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz) { 311 alloc_buffer(dest)->undo_allocation(obj, word_sz); 312 } 313 314 void G1PLABAllocator::flush_and_retire_stats() { 315 for (uint state = 0; state < InCSetState::Num; state++) { 316 PLAB* const buf = _alloc_buffers[state]; 317 if (buf != NULL) { 318 G1EvacStats* stats = _g1h->alloc_buffer_stats(state); 319 buf->flush_and_retire_stats(stats); 320 stats->add_direct_allocated(_direct_allocated[state]); 321 _direct_allocated[state] = 0; 322 } 323 } 324 } 325 326 void G1PLABAllocator::waste(size_t& wasted, size_t& undo_wasted) { 327 wasted = 0; 328 undo_wasted = 0; 329 for (uint state = 0; state < InCSetState::Num; state++) { 330 PLAB * const buf = _alloc_buffers[state]; 331 if (buf != NULL) { 332 wasted += buf->waste(); 333 undo_wasted += buf->undo_waste(); 334 } 335 } 336 } 337 338 bool G1ArchiveAllocator::_archive_check_enabled = false; 339 G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map; 340 G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map; 341 342 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) { 343 // Create the archive allocator, and also enable archive object checking 344 // in mark-sweep, since we will be creating archive regions. 345 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open); 346 enable_archive_object_check(); 347 return result; 348 } 349 350 bool G1ArchiveAllocator::alloc_new_region() { 351 // Allocate the highest free region in the reserved heap, 352 // and add it to our list of allocated regions. It is marked 353 // archive and added to the old set. 354 HeapRegion* hr = _g1h->alloc_highest_free_region(); 355 if (hr == NULL) { 356 return false; 357 } 358 assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); 359 if (_open) { 360 hr->set_open_archive(); 361 } else { 362 hr->set_closed_archive(); 363 } 364 _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr); 365 _g1h->archive_set_add(hr); 366 _g1h->hr_printer()->alloc(hr); 367 _allocated_regions.append(hr); 368 _allocation_region = hr; 369 370 // Set up _bottom and _max to begin allocating in the lowest 371 // min_region_size'd chunk of the allocated G1 region. 372 _bottom = hr->bottom(); 373 _max = _bottom + HeapRegion::min_region_size_in_words(); 374 375 // Tell mark-sweep that objects in this region are not to be marked. 376 set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open); 377 378 // Since we've modified the old set, call update_sizes. 379 _g1h->g1mm()->update_sizes(); 380 return true; 381 } 382 383 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { 384 assert(word_size != 0, "size must not be zero"); 385 if (_allocation_region == NULL) { 386 if (!alloc_new_region()) { 387 return NULL; 388 } 389 } 390 HeapWord* old_top = _allocation_region->top(); 391 assert(_bottom >= _allocation_region->bottom(), 392 "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, 393 p2i(_bottom), p2i(_allocation_region->bottom())); 394 assert(_max <= _allocation_region->end(), 395 "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, 396 p2i(_max), p2i(_allocation_region->end())); 397 assert(_bottom <= old_top && old_top <= _max, 398 "inconsistent allocation state: expected " 399 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, 400 p2i(_bottom), p2i(old_top), p2i(_max)); 401 402 // Allocate the next word_size words in the current allocation chunk. 403 // If allocation would cross the _max boundary, insert a filler and begin 404 // at the base of the next min_region_size'd chunk. Also advance to the next 405 // chunk if we don't yet cross the boundary, but the remainder would be too 406 // small to fill. 407 HeapWord* new_top = old_top + word_size; 408 size_t remainder = pointer_delta(_max, new_top); 409 if ((new_top > _max) || 410 ((new_top < _max) && (remainder < Fill::min_size()))) { 411 if (old_top != _max) { 412 size_t fill_size = pointer_delta(_max, old_top); 413 Fill::range(old_top, fill_size); 414 _summary_bytes_used += fill_size * HeapWordSize; 415 } 416 _allocation_region->set_top(_max); 417 old_top = _bottom = _max; 418 419 // Check if we've just used up the last min_region_size'd chunk 420 // in the current region, and if so, allocate a new one. 421 if (_bottom != _allocation_region->end()) { 422 _max = _bottom + HeapRegion::min_region_size_in_words(); 423 } else { 424 if (!alloc_new_region()) { 425 return NULL; 426 } 427 old_top = _allocation_region->bottom(); 428 } 429 } 430 _allocation_region->set_top(old_top + word_size); 431 _summary_bytes_used += word_size * HeapWordSize; 432 433 return old_top; 434 } 435 436 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges, 437 size_t end_alignment_in_bytes) { 438 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), 439 "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes); 440 assert(is_aligned(end_alignment_in_bytes, HeapWordSize), 441 "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize); 442 443 // If we've allocated nothing, simply return. 444 if (_allocation_region == NULL) { 445 return; 446 } 447 448 // If an end alignment was requested, insert filler objects. 449 if (end_alignment_in_bytes != 0) { 450 HeapWord* currtop = _allocation_region->top(); 451 HeapWord* newtop = align_up(currtop, end_alignment_in_bytes); 452 size_t fill_size = pointer_delta(newtop, currtop); 453 if (fill_size != 0) { 454 if (fill_size < Fill::min_size()) { 455 // If the required fill is smaller than we can represent, 456 // bump up to the next aligned address. We know we won't exceed the current 457 // region boundary because the max supported alignment is smaller than the min 458 // region size, and because the allocation code never leaves space smaller than 459 // the Fill::min_size() at the top of the current allocation region. 460 newtop = align_up(currtop + Fill::min_size(), end_alignment_in_bytes); 461 fill_size = pointer_delta(newtop, currtop); 462 } 463 HeapWord* fill = archive_mem_allocate(fill_size); 464 Fill::range(fill, fill_size); 465 } 466 } 467 468 // Loop through the allocated regions, and create MemRegions summarizing 469 // the allocated address range, combining contiguous ranges. Add the 470 // MemRegions to the GrowableArray provided by the caller. 471 int index = _allocated_regions.length() - 1; 472 assert(_allocated_regions.at(index) == _allocation_region, 473 "expected region %u at end of array, found %u", 474 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()); 475 HeapWord* base_address = _allocation_region->bottom(); 476 HeapWord* top = base_address; 477 478 while (index >= 0) { 479 HeapRegion* next = _allocated_regions.at(index); 480 HeapWord* new_base = next->bottom(); 481 HeapWord* new_top = next->top(); 482 if (new_base != top) { 483 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 484 base_address = new_base; 485 } 486 top = new_top; 487 index = index - 1; 488 } 489 490 assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address)); 491 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 492 _allocated_regions.clear(); 493 _allocation_region = NULL; 494 };