1 /*
   2  * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.inline.hpp"
  27 #include "gc/g1/g1AllocRegion.inline.hpp"
  28 #include "gc/g1/g1EvacStats.inline.hpp"
  29 #include "gc/g1/g1EvacuationInfo.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1Policy.hpp"
  32 #include "gc/g1/heapRegion.inline.hpp"
  33 #include "gc/g1/heapRegionSet.inline.hpp"
  34 #include "gc/g1/heapRegionType.hpp"
  35 #include "utilities/align.hpp"
  36 
  37 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
  38   _g1h(heap),
  39   _survivor_is_full(false),
  40   _old_is_full(false),
  41   _mutator_alloc_region(),
  42   _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
  43   _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
  44   _retained_old_gc_alloc_region(NULL),
  45   _retained_survivor_gc_alloc_region(NULL) {
  46 }
  47 
  48 void G1Allocator::init_mutator_alloc_region() {
  49   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  50   _mutator_alloc_region.init();
  51 
  52   reuse_retained_survivor_region(&_retained_survivor_gc_alloc_region);
  53 }
  54 
  55 void G1Allocator::reuse_retained_survivor_region(HeapRegion** retained_survivor) {
  56   HeapRegion* retained_region = *retained_survivor;
  57   *retained_survivor = NULL;
  58 
  59   if (retained_region != NULL) {
  60     mutator_alloc_region()->set(retained_region);
  61     _g1h->reuse_retained_survivor_region(retained_region);
  62   }
  63 }
  64 
  65 void G1Allocator::release_mutator_alloc_region() {
  66   _mutator_alloc_region.release();
  67   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  68 }
  69 
  70 bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
  71   return _retained_old_gc_alloc_region == hr;
  72 }
  73 
  74 void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
  75                                             OldGCAllocRegion* old,
  76                                             HeapRegion** retained_old) {
  77   HeapRegion* retained_region = *retained_old;
  78   *retained_old = NULL;
  79   assert(retained_region == NULL || !retained_region->is_archive(),
  80          "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
  81 
  82   // We will discard the current GC alloc region if:
  83   // a) it's in the collection set (it can happen!),
  84   // b) it's already full (no point in using it),
  85   // c) it's empty (this means that it was emptied during
  86   // a cleanup and it should be on the free list now), or
  87   // d) it's humongous (this means that it was emptied
  88   // during a cleanup and was added to the free list, but
  89   // has been subsequently used to allocate a humongous
  90   // object that may be less than the region size).
  91   if (retained_region != NULL &&
  92       !retained_region->in_collection_set() &&
  93       !(retained_region->top() == retained_region->end()) &&
  94       !retained_region->is_empty() &&
  95       !retained_region->is_humongous()) {
  96     // The retained region was added to the old region set when it was
  97     // retired. We have to remove it now, since we don't allow regions
  98     // we allocate to in the region sets. We'll re-add it later, when
  99     // it's retired again.
 100     _g1h->old_set_remove(retained_region);
 101     old->set(retained_region);
 102     _g1h->hr_printer()->reuse(retained_region);
 103     evacuation_info.set_alloc_regions_used_before(retained_region->used());
 104   }
 105 }
 106 
 107 void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
 108   assert_at_safepoint_on_vm_thread();
 109 
 110   _survivor_is_full = false;
 111   _old_is_full = false;
 112 
 113   _survivor_gc_alloc_region.init();
 114   _old_gc_alloc_region.init();
 115   reuse_retained_old_region(evacuation_info,
 116                             &_old_gc_alloc_region,
 117                             &_retained_old_gc_alloc_region);
 118 }
 119 
 120 void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
 121   evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() +
 122                                          old_gc_alloc_region()->count());
 123   HeapRegion* reuse_candidate = survivor_gc_alloc_region()->release();
 124   if (reuse_candidate != NULL && SurvivorGCAllocRegion::should_retain(reuse_candidate)) {
 125     _g1h->update_retained_survivor_gc_alloc_region(reuse_candidate);
 126     _retained_survivor_gc_alloc_region = reuse_candidate;
 127   }
 128 
 129   // If we have an old GC alloc region to release, we'll save it in
 130   // _retained_old_gc_alloc_region. If we don't
 131   // _retained_old_gc_alloc_region will become NULL. This is what we
 132   // want either way so no reason to check explicitly for either
 133   // condition.
 134   _retained_old_gc_alloc_region = old_gc_alloc_region()->release();
 135 }
 136 
 137 void G1Allocator::abandon_gc_alloc_regions() {
 138   assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition");
 139   assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
 140   _retained_old_gc_alloc_region = NULL;
 141 }
 142 
 143 bool G1Allocator::survivor_is_full() const {
 144   return _survivor_is_full;
 145 }
 146 
 147 bool G1Allocator::old_is_full() const {
 148   return _old_is_full;
 149 }
 150 
 151 void G1Allocator::set_survivor_full() {
 152   _survivor_is_full = true;
 153 }
 154 
 155 void G1Allocator::set_old_full() {
 156   _old_is_full = true;
 157 }
 158 
 159 size_t G1Allocator::unsafe_max_tlab_alloc() {
 160   // Return the remaining space in the cur alloc region, but not less than
 161   // the min TLAB size.
 162 
 163   // Also, this value can be at most the humongous object threshold,
 164   // since we can't allow tlabs to grow big enough to accommodate
 165   // humongous objects.
 166 
 167   HeapRegion* hr = mutator_alloc_region()->get();
 168   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
 169   if (hr == NULL) {
 170     return max_tlab;
 171   } else {
 172     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
 173   }
 174 }
 175 
 176 size_t G1Allocator::used_in_alloc_regions() {
 177   assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
 178   return mutator_alloc_region()->used_in_alloc_regions();
 179 }
 180 
 181 
 182 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
 183                                               size_t word_size) {
 184   size_t temp = 0;
 185   HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
 186   assert(result == NULL || temp == word_size,
 187          "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
 188          word_size, temp, p2i(result));
 189   return result;
 190 }
 191 
 192 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
 193                                               size_t min_word_size,
 194                                               size_t desired_word_size,
 195                                               size_t* actual_word_size) {
 196   switch (dest.type()) {
 197     case G1HeapRegionAttr::Young:
 198       return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
 199     case G1HeapRegionAttr::Old:
 200       return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
 201     default:
 202       ShouldNotReachHere();
 203       return NULL; // Keep some compilers happy
 204   }
 205 }
 206 
 207 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
 208                                                    size_t desired_word_size,
 209                                                    size_t* actual_word_size) {
 210   assert(!_g1h->is_humongous(desired_word_size),
 211          "we should not be seeing humongous-size allocations in this path");
 212 
 213   HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size,
 214                                                                     desired_word_size,
 215                                                                     actual_word_size);
 216   if (result == NULL && !survivor_is_full()) {
 217     MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 218     result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size,
 219                                                                    desired_word_size,
 220                                                                    actual_word_size);
 221     if (result == NULL) {
 222       set_survivor_full();
 223     }
 224   }
 225   if (result != NULL) {
 226     _g1h->dirty_young_block(result, *actual_word_size);
 227   }
 228   return result;
 229 }
 230 
 231 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
 232                                               size_t desired_word_size,
 233                                               size_t* actual_word_size) {
 234   assert(!_g1h->is_humongous(desired_word_size),
 235          "we should not be seeing humongous-size allocations in this path");
 236 
 237   HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size,
 238                                                                desired_word_size,
 239                                                                actual_word_size);
 240   if (result == NULL && !old_is_full()) {
 241     MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 242     result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,
 243                                                               desired_word_size,
 244                                                               actual_word_size);
 245     if (result == NULL) {
 246       set_old_full();
 247     }
 248   }
 249   return result;
 250 }
 251 
 252 uint G1PLABAllocator::calc_survivor_alignment_bytes() {
 253   assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 254   if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 255     // No need to align objects in the survivors differently, return 0
 256     // which means "survivor alignment is not used".
 257     return 0;
 258   } else {
 259     assert(SurvivorAlignmentInBytes > 0, "sanity");
 260     return SurvivorAlignmentInBytes;
 261   }
 262 }
 263 
 264 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
 265   _g1h(G1CollectedHeap::heap()),
 266   _allocator(allocator),
 267   _surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)),
 268   _tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)),
 269   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
 270   for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
 271     _direct_allocated[state] = 0;
 272     _alloc_buffers[state] = NULL;
 273   }
 274   _alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer;
 275   _alloc_buffers[G1HeapRegionAttr::Old]  = &_tenured_alloc_buffer;
 276 }
 277 
 278 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
 279   return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
 280 }
 281 
 282 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
 283                                                        size_t word_sz,
 284                                                        bool* plab_refill_failed) {
 285   size_t plab_word_size = _g1h->desired_plab_sz(dest);
 286   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
 287 
 288   // Only get a new PLAB if the allocation fits and it would not waste more than
 289   // ParallelGCBufferWastePct in the existing buffer.
 290   if ((required_in_plab <= plab_word_size) &&
 291     may_throw_away_buffer(required_in_plab, plab_word_size)) {
 292 
 293     PLAB* alloc_buf = alloc_buffer(dest);
 294     alloc_buf->retire();
 295 
 296     size_t actual_plab_size = 0;
 297     HeapWord* buf = _allocator->par_allocate_during_gc(dest,
 298                                                        required_in_plab,
 299                                                        plab_word_size,
 300                                                        &actual_plab_size);
 301 
 302     assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
 303            "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
 304            required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
 305 
 306     if (buf != NULL) {
 307       alloc_buf->set_buf(buf, actual_plab_size);
 308 
 309       HeapWord* const obj = alloc_buf->allocate(word_sz);
 310       assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
 311                           SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
 312                           word_sz, required_in_plab, plab_word_size);
 313       return obj;
 314     }
 315     // Otherwise.
 316     *plab_refill_failed = true;
 317   }
 318   // Try direct allocation.
 319   HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
 320   if (result != NULL) {
 321     _direct_allocated[dest.type()] += word_sz;
 322   }
 323   return result;
 324 }
 325 
 326 void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) {
 327   alloc_buffer(dest)->undo_allocation(obj, word_sz);
 328 }
 329 
 330 void G1PLABAllocator::flush_and_retire_stats() {
 331   for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
 332     PLAB* const buf = _alloc_buffers[state];
 333     if (buf != NULL) {
 334       G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
 335       buf->flush_and_retire_stats(stats);
 336       stats->add_direct_allocated(_direct_allocated[state]);
 337       _direct_allocated[state] = 0;
 338     }
 339   }
 340 }
 341 
 342 size_t G1PLABAllocator::waste() const {
 343   size_t result = 0;
 344   for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
 345     PLAB * const buf = _alloc_buffers[state];
 346     if (buf != NULL) {
 347       result += buf->waste();
 348     }
 349   }
 350   return result;
 351 }
 352 
 353 size_t G1PLABAllocator::undo_waste() const {
 354   size_t result = 0;
 355   for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
 356     PLAB * const buf = _alloc_buffers[state];
 357     if (buf != NULL) {
 358       result += buf->undo_waste();
 359     }
 360   }
 361   return result;
 362 }
 363 
 364 bool G1ArchiveAllocator::_archive_check_enabled = false;
 365 G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map;
 366 G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map;
 367 
 368 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
 369   // Create the archive allocator, and also enable archive object checking
 370   // in mark-sweep, since we will be creating archive regions.
 371   G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h, open);
 372   enable_archive_object_check();
 373   return result;
 374 }
 375 
 376 bool G1ArchiveAllocator::alloc_new_region() {
 377   // Allocate the highest free region in the reserved heap,
 378   // and add it to our list of allocated regions. It is marked
 379   // archive and added to the old set.
 380   HeapRegion* hr = _g1h->alloc_highest_free_region();
 381   if (hr == NULL) {
 382     return false;
 383   }
 384   assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
 385   if (_open) {
 386     hr->set_open_archive();
 387   } else {
 388     hr->set_closed_archive();
 389   }
 390   _g1h->policy()->remset_tracker()->update_at_allocate(hr);
 391   _g1h->archive_set_add(hr);
 392   _g1h->hr_printer()->alloc(hr);
 393   _allocated_regions.append(hr);
 394   _allocation_region = hr;
 395 
 396   // Set up _bottom and _max to begin allocating in the lowest
 397   // min_region_size'd chunk of the allocated G1 region.
 398   _bottom = hr->bottom();
 399   _max = _bottom + HeapRegion::min_region_size_in_words();
 400 
 401   // Tell mark-sweep that objects in this region are not to be marked.
 402   set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open);
 403 
 404   // Since we've modified the old set, call update_sizes.
 405   _g1h->g1mm()->update_sizes();
 406   return true;
 407 }
 408 
 409 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
 410   assert(word_size != 0, "size must not be zero");
 411   if (_allocation_region == NULL) {
 412     if (!alloc_new_region()) {
 413       return NULL;
 414     }
 415   }
 416   HeapWord* old_top = _allocation_region->top();
 417   assert(_bottom >= _allocation_region->bottom(),
 418          "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
 419          p2i(_bottom), p2i(_allocation_region->bottom()));
 420   assert(_max <= _allocation_region->end(),
 421          "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
 422          p2i(_max), p2i(_allocation_region->end()));
 423   assert(_bottom <= old_top && old_top <= _max,
 424          "inconsistent allocation state: expected "
 425          PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
 426          p2i(_bottom), p2i(old_top), p2i(_max));
 427 
 428   // Allocate the next word_size words in the current allocation chunk.
 429   // If allocation would cross the _max boundary, insert a filler and begin
 430   // at the base of the next min_region_size'd chunk. Also advance to the next
 431   // chunk if we don't yet cross the boundary, but the remainder would be too
 432   // small to fill.
 433   HeapWord* new_top = old_top + word_size;
 434   size_t remainder = pointer_delta(_max, new_top);
 435   if ((new_top > _max) ||
 436       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
 437     if (old_top != _max) {
 438       size_t fill_size = pointer_delta(_max, old_top);
 439       CollectedHeap::fill_with_object(old_top, fill_size);
 440       _summary_bytes_used += fill_size * HeapWordSize;
 441     }
 442     _allocation_region->set_top(_max);
 443     old_top = _bottom = _max;
 444 
 445     // Check if we've just used up the last min_region_size'd chunk
 446     // in the current region, and if so, allocate a new one.
 447     if (_bottom != _allocation_region->end()) {
 448       _max = _bottom + HeapRegion::min_region_size_in_words();
 449     } else {
 450       if (!alloc_new_region()) {
 451         return NULL;
 452       }
 453       old_top = _allocation_region->bottom();
 454     }
 455   }
 456   _allocation_region->set_top(old_top + word_size);
 457   _summary_bytes_used += word_size * HeapWordSize;
 458 
 459   return old_top;
 460 }
 461 
 462 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 463                                           size_t end_alignment_in_bytes) {
 464   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 465          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
 466   assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
 467          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 468 
 469   // If we've allocated nothing, simply return.
 470   if (_allocation_region == NULL) {
 471     return;
 472   }
 473 
 474   // If an end alignment was requested, insert filler objects.
 475   if (end_alignment_in_bytes != 0) {
 476     HeapWord* currtop = _allocation_region->top();
 477     HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
 478     size_t fill_size = pointer_delta(newtop, currtop);
 479     if (fill_size != 0) {
 480       if (fill_size < CollectedHeap::min_fill_size()) {
 481         // If the required fill is smaller than we can represent,
 482         // bump up to the next aligned address. We know we won't exceed the current
 483         // region boundary because the max supported alignment is smaller than the min
 484         // region size, and because the allocation code never leaves space smaller than
 485         // the min_fill_size at the top of the current allocation region.
 486         newtop = align_up(currtop + CollectedHeap::min_fill_size(),
 487                           end_alignment_in_bytes);
 488         fill_size = pointer_delta(newtop, currtop);
 489       }
 490       HeapWord* fill = archive_mem_allocate(fill_size);
 491       CollectedHeap::fill_with_objects(fill, fill_size);
 492     }
 493   }
 494 
 495   // Loop through the allocated regions, and create MemRegions summarizing
 496   // the allocated address range, combining contiguous ranges. Add the
 497   // MemRegions to the GrowableArray provided by the caller.
 498   int index = _allocated_regions.length() - 1;
 499   assert(_allocated_regions.at(index) == _allocation_region,
 500          "expected region %u at end of array, found %u",
 501          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
 502   HeapWord* base_address = _allocation_region->bottom();
 503   HeapWord* top = base_address;
 504 
 505   while (index >= 0) {
 506     HeapRegion* next = _allocated_regions.at(index);
 507     HeapWord* new_base = next->bottom();
 508     HeapWord* new_top = next->top();
 509     if (new_base != top) {
 510       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 511       base_address = new_base;
 512     }
 513     top = new_top;
 514     index = index - 1;
 515   }
 516 
 517   assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
 518   ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 519   _allocated_regions.clear();
 520   _allocation_region = NULL;
 521 };