1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1AllocRegion.inline.hpp" 28 #include "gc/g1/g1EvacStats.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1MarkSweep.hpp" 32 #include "gc/g1/heapRegion.inline.hpp" 33 #include "gc/g1/heapRegionSet.inline.hpp" 34 35 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) : 36 G1Allocator(heap), 37 _retained_old_gc_alloc_region(NULL), 38 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)), 39 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) { 40 } 41 42 void G1DefaultAllocator::init_mutator_alloc_region() { 43 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); 44 _mutator_alloc_region.init(); 45 } 46 47 void G1DefaultAllocator::release_mutator_alloc_region() { 48 _mutator_alloc_region.release(); 49 assert(_mutator_alloc_region.get() == NULL, "post-condition"); 50 } 51 52 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, 53 OldGCAllocRegion* old, 54 HeapRegion** retained_old) { 55 HeapRegion* retained_region = *retained_old; 56 *retained_old = NULL; 57 assert(retained_region == NULL || !retained_region->is_archive(), 58 "Archive region should not be alloc region (index %u)", retained_region->hrm_index()); 59 60 // We will discard the current GC alloc region if: 61 // a) it's in the collection set (it can happen!), 62 // b) it's already full (no point in using it), 63 // c) it's empty (this means that it was emptied during 64 // a cleanup and it should be on the free list now), or 65 // d) it's humongous (this means that it was emptied 66 // during a cleanup and was added to the free list, but 67 // has been subsequently used to allocate a humongous 68 // object that may be less than the region size). 69 if (retained_region != NULL && 70 !retained_region->in_collection_set() && 71 !(retained_region->top() == retained_region->end()) && 72 !retained_region->is_empty() && 73 !retained_region->is_humongous()) { 74 retained_region->record_timestamp(); 75 // The retained region was added to the old region set when it was 76 // retired. We have to remove it now, since we don't allow regions 77 // we allocate to in the region sets. We'll re-add it later, when 78 // it's retired again. 79 _g1h->old_set_remove(retained_region); 80 bool during_im = _g1h->collector_state()->during_initial_mark_pause(); 81 retained_region->note_start_of_copying(during_im); 82 old->set(retained_region); 83 _g1h->hr_printer()->reuse(retained_region); 84 evacuation_info.set_alloc_regions_used_before(retained_region->used()); 85 } 86 } 87 88 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { 89 assert_at_safepoint(true /* should_be_vm_thread */); 90 91 G1Allocator::init_gc_alloc_regions(evacuation_info); 92 93 _survivor_gc_alloc_region.init(); 94 _old_gc_alloc_region.init(); 95 reuse_retained_old_region(evacuation_info, 96 &_old_gc_alloc_region, 97 &_retained_old_gc_alloc_region); 98 } 99 100 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) { 101 AllocationContext_t context = AllocationContext::current(); 102 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() + 103 old_gc_alloc_region(context)->count()); 104 survivor_gc_alloc_region(context)->release(); 105 // If we have an old GC alloc region to release, we'll save it in 106 // _retained_old_gc_alloc_region. If we don't 107 // _retained_old_gc_alloc_region will become NULL. This is what we 108 // want either way so no reason to check explicitly for either 109 // condition. 110 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release(); 111 if (_retained_old_gc_alloc_region != NULL) { 112 _retained_old_gc_alloc_region->record_retained_region(); 113 } 114 } 115 116 void G1DefaultAllocator::abandon_gc_alloc_regions() { 117 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); 118 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); 119 _retained_old_gc_alloc_region = NULL; 120 } 121 122 G1PLAB::G1PLAB(size_t gclab_word_size) : 123 PLAB(gclab_word_size), _retired(true) { } 124 125 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) { 126 // Return the remaining space in the cur alloc region, but not less than 127 // the min TLAB size. 128 129 // Also, this value can be at most the humongous object threshold, 130 // since we can't allow tlabs to grow big enough to accommodate 131 // humongous objects. 132 133 HeapRegion* hr = mutator_alloc_region(context)->get(); 134 size_t max_tlab = _g1h->max_tlab_size() * wordSize; 135 if (hr == NULL) { 136 return max_tlab; 137 } else { 138 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 139 } 140 } 141 142 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 143 size_t word_size, 144 AllocationContext_t context) { 145 size_t temp = 0; 146 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context); 147 assert(result == NULL || temp == word_size, 148 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 149 word_size, temp, p2i(result)); 150 return result; 151 } 152 153 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, 154 size_t min_word_size, 155 size_t desired_word_size, 156 size_t* actual_word_size, 157 AllocationContext_t context) { 158 switch (dest.value()) { 159 case InCSetState::Young: 160 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context); 161 case InCSetState::Old: 162 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context); 163 default: 164 ShouldNotReachHere(); 165 return NULL; // Keep some compilers happy 166 } 167 } 168 169 bool G1Allocator::survivor_is_full(AllocationContext_t context) const { 170 return _survivor_is_full; 171 } 172 173 bool G1Allocator::old_is_full(AllocationContext_t context) const { 174 return _old_is_full; 175 } 176 177 void G1Allocator::set_survivor_full(AllocationContext_t context) { 178 _survivor_is_full = true; 179 } 180 181 void G1Allocator::set_old_full(AllocationContext_t context) { 182 _old_is_full = true; 183 } 184 185 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, 186 size_t desired_word_size, 187 size_t* actual_word_size, 188 AllocationContext_t context) { 189 assert(!_g1h->is_humongous(desired_word_size), 190 "we should not be seeing humongous-size allocations in this path"); 191 192 HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size, 193 desired_word_size, 194 actual_word_size, 195 false /* bot_updates */); 196 if (result == NULL && !survivor_is_full(context)) { 197 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 198 result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size, 199 desired_word_size, 200 actual_word_size, 201 false /* bot_updates */); 202 if (result == NULL) { 203 set_survivor_full(context); 204 } 205 } 206 if (result != NULL) { 207 _g1h->dirty_young_block(result, *actual_word_size); 208 } 209 return result; 210 } 211 212 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, 213 size_t desired_word_size, 214 size_t* actual_word_size, 215 AllocationContext_t context) { 216 assert(!_g1h->is_humongous(desired_word_size), 217 "we should not be seeing humongous-size allocations in this path"); 218 219 HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size, 220 desired_word_size, 221 actual_word_size, 222 true /* bot_updates */); 223 if (result == NULL && !old_is_full(context)) { 224 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 225 result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size, 226 desired_word_size, 227 actual_word_size, 228 true /* bot_updates */); 229 if (result == NULL) { 230 set_old_full(context); 231 } 232 } 233 return result; 234 } 235 236 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { 237 _survivor_is_full = false; 238 _old_is_full = false; 239 } 240 241 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : 242 _g1h(G1CollectedHeap::heap()), 243 _allocator(allocator), 244 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { 245 for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) { 246 _direct_allocated[i] = 0; 247 } 248 } 249 250 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { 251 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); 252 } 253 254 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, 255 size_t word_sz, 256 AllocationContext_t context, 257 bool* plab_refill_failed) { 258 size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest); 259 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz); 260 261 // Only get a new PLAB if the allocation fits and it would not waste more than 262 // ParallelGCBufferWastePct in the existing buffer. 263 if ((required_in_plab <= plab_word_size) && 264 may_throw_away_buffer(required_in_plab, plab_word_size)) { 265 266 G1PLAB* alloc_buf = alloc_buffer(dest, context); 267 alloc_buf->retire(); 268 269 size_t actual_plab_size = 0; 270 HeapWord* buf = _allocator->par_allocate_during_gc(dest, 271 required_in_plab, 272 plab_word_size, 273 &actual_plab_size, 274 context); 275 276 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), 277 "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, 278 required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); 279 280 if (buf != NULL) { 281 alloc_buf->set_buf(buf, actual_plab_size); 282 283 HeapWord* const obj = alloc_buf->allocate(word_sz); 284 assert(obj != NULL, "PLAB should have been big enough, tried to allocate " 285 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT, 286 word_sz, required_in_plab, plab_word_size); 287 return obj; 288 } 289 // Otherwise. 290 *plab_refill_failed = true; 291 } 292 // Try direct allocation. 293 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context); 294 if (result != NULL) { 295 _direct_allocated[dest.value()] += word_sz; 296 } 297 return result; 298 } 299 300 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) { 301 alloc_buffer(dest, context)->undo_allocation(obj, word_sz); 302 } 303 304 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) : 305 G1PLABAllocator(allocator), 306 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)), 307 _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) { 308 for (uint state = 0; state < InCSetState::Num; state++) { 309 _alloc_buffers[state] = NULL; 310 } 311 _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; 312 _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; 313 } 314 315 void G1DefaultPLABAllocator::flush_and_retire_stats() { 316 for (uint state = 0; state < InCSetState::Num; state++) { 317 G1PLAB* const buf = _alloc_buffers[state]; 318 if (buf != NULL) { 319 G1EvacStats* stats = _g1h->alloc_buffer_stats(state); 320 buf->flush_and_retire_stats(stats); 321 stats->add_direct_allocated(_direct_allocated[state]); 322 _direct_allocated[state] = 0; 323 } 324 } 325 } 326 327 void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) { 328 wasted = 0; 329 undo_wasted = 0; 330 for (uint state = 0; state < InCSetState::Num; state++) { 331 G1PLAB * const buf = _alloc_buffers[state]; 332 if (buf != NULL) { 333 wasted += buf->waste(); 334 undo_wasted += buf->undo_waste(); 335 } 336 } 337 } 338 339 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) { 340 // Create the archive allocator, and also enable archive object checking 341 // in mark-sweep, since we will be creating archive regions. 342 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h); 343 G1MarkSweep::enable_archive_object_check(); 344 return result; 345 } 346 347 bool G1ArchiveAllocator::alloc_new_region() { 348 // Allocate the highest free region in the reserved heap, 349 // and add it to our list of allocated regions. It is marked 350 // archive and added to the old set. 351 HeapRegion* hr = _g1h->alloc_highest_free_region(); 352 if (hr == NULL) { 353 return false; 354 } 355 assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); 356 hr->set_archive(); 357 _g1h->old_set_add(hr); 358 _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive); 359 _allocated_regions.append(hr); 360 _allocation_region = hr; 361 362 // Set up _bottom and _max to begin allocating in the lowest 363 // min_region_size'd chunk of the allocated G1 region. 364 _bottom = hr->bottom(); 365 _max = _bottom + HeapRegion::min_region_size_in_words(); 366 367 // Tell mark-sweep that objects in this region are not to be marked. 368 G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true); 369 370 // Since we've modified the old set, call update_sizes. 371 _g1h->g1mm()->update_sizes(); 372 return true; 373 } 374 375 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { 376 assert(word_size != 0, "size must not be zero"); 377 if (_allocation_region == NULL) { 378 if (!alloc_new_region()) { 379 return NULL; 380 } 381 } 382 HeapWord* old_top = _allocation_region->top(); 383 assert(_bottom >= _allocation_region->bottom(), 384 "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, 385 p2i(_bottom), p2i(_allocation_region->bottom())); 386 assert(_max <= _allocation_region->end(), 387 "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, 388 p2i(_max), p2i(_allocation_region->end())); 389 assert(_bottom <= old_top && old_top <= _max, 390 "inconsistent allocation state: expected " 391 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, 392 p2i(_bottom), p2i(old_top), p2i(_max)); 393 394 // Allocate the next word_size words in the current allocation chunk. 395 // If allocation would cross the _max boundary, insert a filler and begin 396 // at the base of the next min_region_size'd chunk. Also advance to the next 397 // chunk if we don't yet cross the boundary, but the remainder would be too 398 // small to fill. 399 HeapWord* new_top = old_top + word_size; 400 size_t remainder = pointer_delta(_max, new_top); 401 if ((new_top > _max) || 402 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) { 403 if (old_top != _max) { 404 size_t fill_size = pointer_delta(_max, old_top); 405 CollectedHeap::fill_with_object(old_top, fill_size); 406 _summary_bytes_used += fill_size * HeapWordSize; 407 } 408 _allocation_region->set_top(_max); 409 old_top = _bottom = _max; 410 411 // Check if we've just used up the last min_region_size'd chunk 412 // in the current region, and if so, allocate a new one. 413 if (_bottom != _allocation_region->end()) { 414 _max = _bottom + HeapRegion::min_region_size_in_words(); 415 } else { 416 if (!alloc_new_region()) { 417 return NULL; 418 } 419 old_top = _allocation_region->bottom(); 420 } 421 } 422 _allocation_region->set_top(old_top + word_size); 423 _summary_bytes_used += word_size * HeapWordSize; 424 425 return old_top; 426 } 427 428 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges, 429 size_t end_alignment_in_bytes) { 430 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), 431 "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes); 432 assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize), 433 "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize); 434 435 // If we've allocated nothing, simply return. 436 if (_allocation_region == NULL) { 437 return; 438 } 439 440 // If an end alignment was requested, insert filler objects. 441 if (end_alignment_in_bytes != 0) { 442 HeapWord* currtop = _allocation_region->top(); 443 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes); 444 size_t fill_size = pointer_delta(newtop, currtop); 445 if (fill_size != 0) { 446 if (fill_size < CollectedHeap::min_fill_size()) { 447 // If the required fill is smaller than we can represent, 448 // bump up to the next aligned address. We know we won't exceed the current 449 // region boundary because the max supported alignment is smaller than the min 450 // region size, and because the allocation code never leaves space smaller than 451 // the min_fill_size at the top of the current allocation region. 452 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(), 453 end_alignment_in_bytes); 454 fill_size = pointer_delta(newtop, currtop); 455 } 456 HeapWord* fill = archive_mem_allocate(fill_size); 457 CollectedHeap::fill_with_objects(fill, fill_size); 458 } 459 } 460 461 // Loop through the allocated regions, and create MemRegions summarizing 462 // the allocated address range, combining contiguous ranges. Add the 463 // MemRegions to the GrowableArray provided by the caller. 464 int index = _allocated_regions.length() - 1; 465 assert(_allocated_regions.at(index) == _allocation_region, 466 "expected region %u at end of array, found %u", 467 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()); 468 HeapWord* base_address = _allocation_region->bottom(); 469 HeapWord* top = base_address; 470 471 while (index >= 0) { 472 HeapRegion* next = _allocated_regions.at(index); 473 HeapWord* new_base = next->bottom(); 474 HeapWord* new_top = next->top(); 475 if (new_base != top) { 476 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 477 base_address = new_base; 478 } 479 top = new_top; 480 index = index - 1; 481 } 482 483 assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address)); 484 ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); 485 _allocated_regions.clear(); 486 _allocation_region = NULL; 487 };