1 /* 2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1AllocRegion.inline.hpp" 28 #include "gc/g1/g1EvacStats.inline.hpp" 29 #include "gc/g1/g1EvacuationInfo.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1NUMA.hpp" 32 #include "gc/g1/g1Policy.hpp" 33 #include "gc/g1/heapRegion.inline.hpp" 34 #include "gc/g1/heapRegionSet.inline.hpp" 35 #include "gc/g1/heapRegionType.hpp" 36 #include "utilities/align.hpp" 37 38 G1Allocator::G1Allocator(G1CollectedHeap* heap) : 39 _g1h(heap), 40 _numa(heap->numa()), 41 _survivor_is_full(false), 42 _old_is_full(false), 43 _num_alloc_regions(_numa->num_active_nodes()), 44 _mutator_alloc_regions(NULL), 45 _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)), 46 _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)), 47 _retained_old_gc_alloc_region(NULL) { 48 49 _mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC); 50 for (uint i = 0; i < _num_alloc_regions; i++) { 51 ::new(_mutator_alloc_regions + i) MutatorAllocRegion(i); 52 } 53 } 54 55 G1Allocator::~G1Allocator() { 56 for (uint i = 0; i < _num_alloc_regions; i++) { 57 _mutator_alloc_regions[i].~MutatorAllocRegion(); 58 } 59 FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions); 60 } 61 62 #ifdef ASSERT 63 bool G1Allocator::has_mutator_alloc_region() { 64 uint node_index = current_node_index(); 65 return mutator_alloc_region(node_index)->get() != NULL; 66 } 67 #endif 68 69 void G1Allocator::init_mutator_alloc_regions() { 70 for (uint i = 0; i < _num_alloc_regions; i++) { 71 assert(mutator_alloc_region(i)->get() == NULL, "pre-condition"); 72 mutator_alloc_region(i)->init(); 73 } 74 } 75 76 void G1Allocator::release_mutator_alloc_regions() { 77 for (uint i = 0; i < _num_alloc_regions; i++) { 78 mutator_alloc_region(i)->release(); 79 assert(mutator_alloc_region(i)->get() == NULL, "post-condition"); 80 } 81 } 82 83 bool G1Allocator::is_retained_old_region(HeapRegion* hr) { 84 return _retained_old_gc_alloc_region == hr; 85 } 86 87 void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info, 88 OldGCAllocRegion* old, 89 HeapRegion** retained_old) { 90 HeapRegion* retained_region = *retained_old; 91 *retained_old = NULL; 92 assert(retained_region == NULL || !retained_region->is_archive(), 93 "Archive region should not be alloc region (index %u)", retained_region->hrm_index()); 94 95 // We will discard the current GC alloc region if: 96 // a) it's in the collection set (it can happen!), 97 // b) it's already full (no point in using it), 98 // c) it's empty (this means that it was emptied during 99 // a cleanup and it should be on the free list now), or 100 // d) it's humongous (this means that it was emptied 101 // during a cleanup and was added to the free list, but 102 // has been subsequently used to allocate a humongous 103 // object that may be less than the region size). 104 if (retained_region != NULL && 105 !retained_region->in_collection_set() && 106 !(retained_region->top() == retained_region->end()) && 107 !retained_region->is_empty() && 108 !retained_region->is_humongous()) { 109 // The retained region was added to the old region set when it was 110 // retired. We have to remove it now, since we don't allow regions 111 // we allocate to in the region sets. We'll re-add it later, when 112 // it's retired again. 113 _g1h->old_set_remove(retained_region); 114 old->set(retained_region); 115 _g1h->hr_printer()->reuse(retained_region); 116 evacuation_info.set_alloc_regions_used_before(retained_region->used()); 117 } 118 } 119 120 void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) { 121 assert_at_safepoint_on_vm_thread(); 122 123 _survivor_is_full = false; 124 _old_is_full = false; 125 126 _survivor_gc_alloc_region.init(); 127 _old_gc_alloc_region.init(); 128 reuse_retained_old_region(evacuation_info, 129 &_old_gc_alloc_region, 130 &_retained_old_gc_alloc_region); 131 } 132 133 void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) { 134 evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() + 135 old_gc_alloc_region()->count()); 136 survivor_gc_alloc_region()->release(); 137 // If we have an old GC alloc region to release, we'll save it in 138 // _retained_old_gc_alloc_region. If we don't 139 // _retained_old_gc_alloc_region will become NULL. This is what we 140 // want either way so no reason to check explicitly for either 141 // condition. 142 _retained_old_gc_alloc_region = old_gc_alloc_region()->release(); 143 } 144 145 void G1Allocator::abandon_gc_alloc_regions() { 146 assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition"); 147 assert(old_gc_alloc_region()->get() == NULL, "pre-condition"); 148 _retained_old_gc_alloc_region = NULL; 149 } 150 151 bool G1Allocator::survivor_is_full() const { 152 return _survivor_is_full; 153 } 154 155 bool G1Allocator::old_is_full() const { 156 return _old_is_full; 157 } 158 159 void G1Allocator::set_survivor_full() { 160 _survivor_is_full = true; 161 } 162 163 void G1Allocator::set_old_full() { 164 _old_is_full = true; 165 } 166 167 size_t G1Allocator::unsafe_max_tlab_alloc() { 168 // Return the remaining space in the cur alloc region, but not less than 169 // the min TLAB size. 170 171 // Also, this value can be at most the humongous object threshold, 172 // since we can't allow tlabs to grow big enough to accommodate 173 // humongous objects. 174 175 uint node_index = current_node_index(); 176 HeapRegion* hr = mutator_alloc_region(node_index)->get(); 177 size_t max_tlab = _g1h->max_tlab_size() * wordSize; 178 if (hr == NULL) { 179 return max_tlab; 180 } else { 181 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 182 } 183 } 184 185 size_t G1Allocator::used_in_alloc_regions() { 186 assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf."); 187 size_t used = 0; 188 for (uint i = 0; i < _num_alloc_regions; i++) { 189 used += mutator_alloc_region(i)->used_in_alloc_regions(); 190 } 191 return used; 192 } 193 194 195 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, 196 size_t word_size) { 197 size_t temp = 0; 198 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp); 199 assert(result == NULL || temp == word_size, 200 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 201 word_size, temp, p2i(result)); 202 return result; 203 } 204 205 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, 206 size_t min_word_size, 207 size_t desired_word_size, 208 size_t* actual_word_size) { 209 switch (dest.type()) { 210 case G1HeapRegionAttr::Young: 211 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size); 212 case G1HeapRegionAttr::Old: 213 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size); 214 default: 215 ShouldNotReachHere(); 216 return NULL; // Keep some compilers happy 217 } 218 } 219 220 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, 221 size_t desired_word_size, 222 size_t* actual_word_size) { 223 assert(!_g1h->is_humongous(desired_word_size), 224 "we should not be seeing humongous-size allocations in this path"); 225 226 HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size, 227 desired_word_size, 228 actual_word_size); 229 if (result == NULL && !survivor_is_full()) { 230 MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); 231 result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size, 232 desired_word_size, 233 actual_word_size); 234 if (result == NULL) { 235 set_survivor_full(); 236 } 237 } 238 if (result != NULL) { 239 _g1h->dirty_young_block(result, *actual_word_size); 240 } 241 return result; 242 } 243 244 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, 245 size_t desired_word_size, 246 size_t* actual_word_size) { 247 assert(!_g1h->is_humongous(desired_word_size), 248 "we should not be seeing humongous-size allocations in this path"); 249 250 HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size, 251 desired_word_size, 252 actual_word_size); 253 if (result == NULL && !old_is_full()) { 254 MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); 255 result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size, 256 desired_word_size, 257 actual_word_size); 258 if (result == NULL) { 259 set_old_full(); 260 } 261 } 262 return result; 263 } 264 265 uint G1PLABAllocator::calc_survivor_alignment_bytes() { 266 assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity"); 267 if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) { 268 // No need to align objects in the survivors differently, return 0 269 // which means "survivor alignment is not used". 270 return 0; 271 } else { 272 assert(SurvivorAlignmentInBytes > 0, "sanity"); 273 return SurvivorAlignmentInBytes; 274 } 275 } 276 277 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : 278 _g1h(G1CollectedHeap::heap()), 279 _allocator(allocator), 280 _surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)), 281 _tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)), 282 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { 283 for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { 284 _direct_allocated[state] = 0; 285 _alloc_buffers[state] = NULL; 286 } 287 _alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer; 288 _alloc_buffers[G1HeapRegionAttr::Old] = &_tenured_alloc_buffer; 289 } 290 291 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { 292 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); 293 } 294 295 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest, 296 size_t word_sz, 297 bool* plab_refill_failed) { 298 size_t plab_word_size = _g1h->desired_plab_sz(dest); 299 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz); 300 301 // Only get a new PLAB if the allocation fits and it would not waste more than 302 // ParallelGCBufferWastePct in the existing buffer. 303 if ((required_in_plab <= plab_word_size) && 304 may_throw_away_buffer(required_in_plab, plab_word_size)) { 305 306 PLAB* alloc_buf = alloc_buffer(dest); 307 alloc_buf->retire(); 308 309 size_t actual_plab_size = 0; 310 HeapWord* buf = _allocator->par_allocate_during_gc(dest, 311 required_in_plab, 312 plab_word_size, 313 &actual_plab_size); 314 315 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), 316 "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 317 required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); 318 319 if (buf != NULL) { 320 alloc_buf->set_buf(buf, actual_plab_size); 321 322 HeapWord* const obj = alloc_buf->allocate(word_sz); 323 assert(obj != NULL, "PLAB should have been big enough, tried to allocate " 324 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT, 325 word_sz, required_in_plab, plab_word_size); 326 return obj; 327 } 328 // Otherwise. 329 *plab_refill_failed = true; 330 } 331 // Try direct allocation. 332 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz); 333 if (result != NULL) { 334 _direct_allocated[dest.type()] += word_sz; 335 } 336 return result; 337 } 338 339 void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) { 340 alloc_buffer(dest)->undo_allocation(obj, word_sz); 341 } 342 343 void G1PLABAllocator::flush_and_retire_stats() { 344 for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { 345 PLAB* const buf = _alloc_buffers[state]; 346 if (buf != NULL) { 347 G1EvacStats* stats = _g1h->alloc_buffer_stats(state); 348 buf->flush_and_retire_stats(stats); 349 stats->add_direct_allocated(_direct_allocated[state]); 350 _direct_allocated[state] = 0; 351 } 352 } 353 } 354 355 size_t G1PLABAllocator::waste() const { 356 size_t result = 0; 357 for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { 358 PLAB * const buf = _alloc_buffers[state]; 359 if (buf != NULL) { 360 result += buf->waste(); 361 } 362 } 363 return result; 364 } 365 366 size_t G1PLABAllocator::undo_waste() const { 367 size_t result = 0; 368 for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { 369 PLAB * const buf = _alloc_buffers[state]; 370 if (buf != NULL) { 371 result += buf->undo_waste(); 372 } 373 } 374 return result; 375 } 376 377 bool G1ArchiveAllocator::_archive_check_enabled = false; 378 G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map; 379 G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map; 380 381 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) { 382 // Create the archive allocator, and also enable archive object checking 383 // in mark-sweep, since we will be creating archive regions. 384 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open); 385 enable_archive_object_check(); 386 return result; 387 } 388 389 bool G1ArchiveAllocator::alloc_new_region() { 390 // Allocate the highest free region in the reserved heap, 391 // and add it to our list of allocated regions. It is marked 392 // archive and added to the old set. 393 HeapRegion* hr = _g1h->alloc_highest_free_region(); 394 if (hr == NULL) { 395 return false; 396 } 397 assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); 398 if (_open) { 399 hr->set_open_archive(); 400 } else { 401 hr->set_closed_archive(); 402 } 403 _g1h->policy()->remset_tracker()->update_at_allocate(hr); 404 _g1h->archive_set_add(hr); 405 _g1h->hr_printer()->alloc(hr); 406 _allocated_regions.append(hr); 407 _allocation_region = hr; 408 409 // Set up _bottom and _max to begin allocating in the lowest 410 // min_region_size'd chunk of the allocated G1 region. 411 _bottom = hr->bottom(); 412 _max = _bottom + HeapRegion::min_region_size_in_words(); 413 414 // Tell mark-sweep that objects in this region are not to be marked. 415 set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open); 416 417 // Since we've modified the old set, call update_sizes. 418 _g1h->g1mm()->update_sizes(); 419 return true; 420 } 421 422 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { 423 assert(word_size != 0, "size must not be zero"); 424 if (_allocation_region == NULL) { 425 if (!alloc_new_region()) { 426 return NULL; 427 } 428 } 429 HeapWord* old_top = _allocation_region->top(); 430 assert(_bottom >= _allocation_region->bottom(), 431 "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, 432 p2i(_bottom), p2i(_allocation_region->bottom())); 433 assert(_max <= _allocation_region->end(), 434 "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, 435 p2i(_max), p2i(_allocation_region->end())); 436 assert(_bottom <= old_top && old_top <= _max, 437 "inconsistent allocation state: expected " 438 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, 439 p2i(_bottom), p2i(old_top), p2i(_max)); 440 441 // Allocate the next word_size words in the current allocation chunk. 442 // If allocation would cross the _max boundary, insert a filler and begin 443 // at the base of the next min_region_size'd chunk. Also advance to the next 444 // chunk if we don't yet cross the boundary, but the remainder would be too 445 // small to fill. 446 HeapWord* new_top = old_top + word_size; 447 size_t remainder = pointer_delta(_max, new_top); 448 if ((new_top > _max) || 449 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) { 450 if (old_top != _max) { 451 size_t fill_size = pointer_delta(_max, old_top); 452 CollectedHeap::fill_with_object(old_top, fill_size); 453 _summary_bytes_used += fill_size * HeapWordSize; 454 } 455 _allocation_region->set_top(_max); 456 old_top = _bottom = _max; 457 458 // Check if we've just used up the last min_region_size'd chunk 459 // in the current region, and if so, allocate a new one. 460 if (_bottom != _allocation_region->end()) { 461 _max = _bottom + HeapRegion::min_region_size_in_words(); 462 } else { 463 if (!alloc_new_region()) { 464 return NULL; 465 } 466 old_top = _allocation_region->bottom(); 467 } 468 } 469 _allocation_region->set_top(old_top + word_size); 470 _summary_bytes_used += word_size * HeapWordSize; 471 472 return old_top; 473 } 474 475 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges, 476 size_t end_alignment_in_bytes) { 477 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), 478 "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes); 479 assert(is_aligned(end_alignment_in_bytes, HeapWordSize), 480 "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize); 481 482 // If we've allocated nothing, simply return. 483 if (_allocation_region == NULL) { 484 return; 485 } 486 487 // If an end alignment was requested, insert filler objects. 488 if (end_alignment_in_bytes != 0) { 489 HeapWord* currtop = _allocation_region->top(); 490 HeapWord* newtop = align_up(currtop, end_alignment_in_bytes); 491 size_t fill_size = pointer_delta(newtop, currtop); 492 if (fill_size != 0) { 493 if (fill_size < CollectedHeap::min_fill_size()) { 494 // If the required fill is smaller than we can represent, 495 // bump up to the next aligned address. We know we won't exceed the current 496 // region boundary because the max supported alignment is smaller than the min 497 // region size, and because the allocation code never leaves space smaller than 498 // the min_fill_size at the top of the current allocation region. 499 newtop = align_up(currtop + CollectedHeap::min_fill_size(), 500 end_alignment_in_bytes); 501 fill_size = pointer_delta(newtop, currtop); 502 } 503 HeapWord* fill = archive_mem_allocate(fill_size); 504 CollectedHeap::fill_with_objects(fill, fill_size); 505 } 506 } 507 508 // Loop through the allocated regions, and create MemRegions summarizing 509 // the allocated address range, combining contiguous ranges. Add the 510 // MemRegions to the GrowableArray provided by the caller. 511 int index = _allocated_regions.length() - 1; 512 assert(_allocated_regions.at(index) == _allocation_region, 513 "expected region %u at end of array, found %u", 514 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()); 515 HeapWord* base_address = _allocation_region->bottom(); 516 HeapWord* top = base_address; 517 518 while (index >= 0) { 519 HeapRegion* next = _allocated_regions.at(index); 520 HeapWord* new_base = next->bottom(); 521 HeapWord* new_top = next->top(); 522 if (new_base != top) { 523 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 524 base_address = new_base; 525 } 526 top = new_top; 527 index = index - 1; 528 } 529 530 assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address)); 531 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 532 _allocated_regions.clear(); 533 _allocation_region = NULL; 534 };