1 /*
   2  * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.inline.hpp"
  27 #include "gc/g1/g1AllocRegion.inline.hpp"
  28 #include "gc/g1/g1EvacStats.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1MarkSweep.hpp"
  31 #include "gc/g1/heapRegion.inline.hpp"
  32 #include "gc/g1/heapRegionSet.inline.hpp"
  33 
  34 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
  35   G1Allocator(heap),
  36   _survivor_is_full(false),
  37   _old_is_full(false),
  38   _retained_old_gc_alloc_region(NULL),
  39   _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
  40   _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
  41 }
  42 
  43 void G1DefaultAllocator::init_mutator_alloc_region() {
  44   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  45   _mutator_alloc_region.init();
  46 }
  47 
  48 void G1DefaultAllocator::release_mutator_alloc_region() {
  49   _mutator_alloc_region.release();
  50   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  51 }
  52 
  53 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
  54                                             OldGCAllocRegion* old,
  55                                             HeapRegion** retained_old) {
  56   HeapRegion* retained_region = *retained_old;
  57   *retained_old = NULL;
  58   assert(retained_region == NULL || !retained_region->is_archive(),
  59          "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
  60 
  61   // We will discard the current GC alloc region if:
  62   // a) it's in the collection set (it can happen!),
  63   // b) it's already full (no point in using it),
  64   // c) it's empty (this means that it was emptied during
  65   // a cleanup and it should be on the free list now), or
  66   // d) it's humongous (this means that it was emptied
  67   // during a cleanup and was added to the free list, but
  68   // has been subsequently used to allocate a humongous
  69   // object that may be less than the region size).
  70   if (retained_region != NULL &&
  71       !retained_region->in_collection_set() &&
  72       !(retained_region->top() == retained_region->end()) &&
  73       !retained_region->is_empty() &&
  74       !retained_region->is_humongous()) {
  75     retained_region->record_timestamp();
  76     // The retained region was added to the old region set when it was
  77     // retired. We have to remove it now, since we don't allow regions
  78     // we allocate to in the region sets. We'll re-add it later, when
  79     // it's retired again.
  80     _g1h->old_set_remove(retained_region);
  81     bool during_im = _g1h->collector_state()->during_initial_mark_pause();
  82     retained_region->note_start_of_copying(during_im);
  83     old->set(retained_region);
  84     _g1h->hr_printer()->reuse(retained_region);
  85     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  86   }
  87 }
  88 
  89 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  90   assert_at_safepoint(true /* should_be_vm_thread */);
  91 
  92   _survivor_is_full = false;
  93   _old_is_full = false;
  94 
  95   _survivor_gc_alloc_region.init();
  96   _old_gc_alloc_region.init();
  97   reuse_retained_old_region(evacuation_info,
  98                             &_old_gc_alloc_region,
  99                             &_retained_old_gc_alloc_region);
 100 }
 101 
 102 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
 103   AllocationContext_t context = AllocationContext::current();
 104   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
 105                                          old_gc_alloc_region(context)->count());
 106   survivor_gc_alloc_region(context)->release();
 107   // If we have an old GC alloc region to release, we'll save it in
 108   // _retained_old_gc_alloc_region. If we don't
 109   // _retained_old_gc_alloc_region will become NULL. This is what we
 110   // want either way so no reason to check explicitly for either
 111   // condition.
 112   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
 113   if (_retained_old_gc_alloc_region != NULL) {
 114     _retained_old_gc_alloc_region->record_retained_region();
 115   }
 116 }
 117 
 118 void G1DefaultAllocator::abandon_gc_alloc_regions() {
 119   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 120   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 121   _retained_old_gc_alloc_region = NULL;
 122 }
 123 
 124 bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const {
 125   return _survivor_is_full;
 126 }
 127 
 128 bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const {
 129   return _old_is_full;
 130 }
 131 
 132 void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) {
 133   _survivor_is_full = true;
 134 }
 135 
 136 void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
 137   _old_is_full = true;
 138 }
 139 
 140 G1PLAB::G1PLAB(size_t gclab_word_size) :
 141   PLAB(gclab_word_size), _retired(true) { }
 142 
 143 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
 144   // Return the remaining space in the cur alloc region, but not less than
 145   // the min TLAB size.
 146 
 147   // Also, this value can be at most the humongous object threshold,
 148   // since we can't allow tlabs to grow big enough to accommodate
 149   // humongous objects.
 150 
 151   HeapRegion* hr = mutator_alloc_region(context)->get();
 152   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
 153   if (hr == NULL) {
 154     return max_tlab;
 155   } else {
 156     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
 157   }
 158 }
 159 
 160 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
 161                                               size_t word_size,
 162                                               AllocationContext_t context) {
 163   size_t temp = 0;
 164   HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
 165   assert(result == NULL || temp == word_size,
 166          "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
 167          word_size, temp, p2i(result));
 168   return result;
 169 }
 170 
 171 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
 172                                               size_t min_word_size,
 173                                               size_t desired_word_size,
 174                                               size_t* actual_word_size,
 175                                               AllocationContext_t context) {
 176   switch (dest.value()) {
 177     case InCSetState::Young:
 178       return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
 179     case InCSetState::Old:
 180       return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
 181     default:
 182       ShouldNotReachHere();
 183       return NULL; // Keep some compilers happy
 184   }
 185 }
 186 
 187 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
 188                                                    size_t desired_word_size,
 189                                                    size_t* actual_word_size,
 190                                                    AllocationContext_t context) {
 191   assert(!_g1h->is_humongous(desired_word_size),
 192          "we should not be seeing humongous-size allocations in this path");
 193 
 194   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
 195                                                                            desired_word_size,
 196                                                                            actual_word_size,
 197                                                                            false /* bot_updates */);
 198   if (result == NULL && !survivor_is_full(context)) {
 199     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 200     result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
 201                                                                           desired_word_size,
 202                                                                           actual_word_size,
 203                                                                           false /* bot_updates */);
 204     if (result == NULL) {
 205       set_survivor_full(context);
 206     }
 207   }
 208   if (result != NULL) {
 209     _g1h->dirty_young_block(result, *actual_word_size);
 210   }
 211   return result;
 212 }
 213 
 214 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
 215                                               size_t desired_word_size,
 216                                               size_t* actual_word_size,
 217                                               AllocationContext_t context) {
 218   assert(!_g1h->is_humongous(desired_word_size),
 219          "we should not be seeing humongous-size allocations in this path");
 220 
 221   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
 222                                                                       desired_word_size,
 223                                                                       actual_word_size,
 224                                                                       true /* bot_updates */);
 225   if (result == NULL && !old_is_full(context)) {
 226     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 227     result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
 228                                                                      desired_word_size,
 229                                                                      actual_word_size,
 230                                                                      true /* bot_updates */);
 231     if (result == NULL) {
 232       set_old_full(context);
 233     }
 234   }
 235   return result;
 236 }
 237 
 238 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
 239   _g1h(G1CollectedHeap::heap()),
 240   _allocator(allocator),
 241   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
 242   for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
 243     _direct_allocated[i] = 0;
 244   }
 245 }
 246 
 247 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
 248   return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
 249 }
 250 
 251 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
 252                                                        size_t word_sz,
 253                                                        AllocationContext_t context,
 254                                                        bool* plab_refill_failed) {
 255   size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
 256   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
 257 
 258   // Only get a new PLAB if the allocation fits and it would not waste more than
 259   // ParallelGCBufferWastePct in the existing buffer.
 260   if ((required_in_plab <= plab_word_size) &&
 261     may_throw_away_buffer(required_in_plab, plab_word_size)) {
 262 
 263     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 264     alloc_buf->retire();
 265 
 266     size_t actual_plab_size = 0;
 267     HeapWord* buf = _allocator->par_allocate_during_gc(dest,
 268                                                        required_in_plab,
 269                                                        plab_word_size,
 270                                                        &actual_plab_size,
 271                                                        context);
 272 
 273     assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
 274            "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
 275            required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
 276 
 277     if (buf != NULL) {
 278       alloc_buf->set_buf(buf, actual_plab_size);
 279 
 280       HeapWord* const obj = alloc_buf->allocate(word_sz);
 281       assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
 282                           SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
 283                           word_sz, required_in_plab, plab_word_size);
 284       return obj;
 285     }
 286     // Otherwise.
 287     *plab_refill_failed = true;
 288   }
 289   // Try direct allocation.
 290   HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
 291   if (result != NULL) {
 292     _direct_allocated[dest.value()] += word_sz;
 293   }
 294   return result;
 295 }
 296 
 297 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 298   alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 299 }
 300 
 301 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
 302   G1PLABAllocator(allocator),
 303   _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
 304   _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
 305   for (uint state = 0; state < InCSetState::Num; state++) {
 306     _alloc_buffers[state] = NULL;
 307   }
 308   _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
 309   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 310 }
 311 
 312 void G1DefaultPLABAllocator::flush_and_retire_stats() {
 313   for (uint state = 0; state < InCSetState::Num; state++) {
 314     G1PLAB* const buf = _alloc_buffers[state];
 315     if (buf != NULL) {
 316       G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
 317       buf->flush_and_retire_stats(stats);
 318       stats->add_direct_allocated(_direct_allocated[state]);
 319       _direct_allocated[state] = 0;
 320     }
 321   }
 322 }
 323 
 324 void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
 325   wasted = 0;
 326   undo_wasted = 0;
 327   for (uint state = 0; state < InCSetState::Num; state++) {
 328     G1PLAB * const buf = _alloc_buffers[state];
 329     if (buf != NULL) {
 330       wasted += buf->waste();
 331       undo_wasted += buf->undo_waste();
 332     }
 333   }
 334 }
 335 
 336 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
 337   // Create the archive allocator, and also enable archive object checking
 338   // in mark-sweep, since we will be creating archive regions.
 339   G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
 340   G1MarkSweep::enable_archive_object_check();
 341   return result;
 342 }
 343 
 344 bool G1ArchiveAllocator::alloc_new_region() {
 345   // Allocate the highest free region in the reserved heap,
 346   // and add it to our list of allocated regions. It is marked
 347   // archive and added to the old set.
 348   HeapRegion* hr = _g1h->alloc_highest_free_region();
 349   if (hr == NULL) {
 350     return false;
 351   }
 352   assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
 353   hr->set_archive();
 354   _g1h->old_set_add(hr);
 355   _g1h->hr_printer()->alloc(hr);
 356   _allocated_regions.append(hr);
 357   _allocation_region = hr;
 358 
 359   // Set up _bottom and _max to begin allocating in the lowest
 360   // min_region_size'd chunk of the allocated G1 region.
 361   _bottom = hr->bottom();
 362   _max = _bottom + HeapRegion::min_region_size_in_words();
 363 
 364   // Tell mark-sweep that objects in this region are not to be marked.
 365   G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
 366 
 367   // Since we've modified the old set, call update_sizes.
 368   _g1h->g1mm()->update_sizes();
 369   return true;
 370 }
 371 
 372 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
 373   assert(word_size != 0, "size must not be zero");
 374   if (_allocation_region == NULL) {
 375     if (!alloc_new_region()) {
 376       return NULL;
 377     }
 378   }
 379   HeapWord* old_top = _allocation_region->top();
 380   assert(_bottom >= _allocation_region->bottom(),
 381          "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
 382          p2i(_bottom), p2i(_allocation_region->bottom()));
 383   assert(_max <= _allocation_region->end(),
 384          "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
 385          p2i(_max), p2i(_allocation_region->end()));
 386   assert(_bottom <= old_top && old_top <= _max,
 387          "inconsistent allocation state: expected "
 388          PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
 389          p2i(_bottom), p2i(old_top), p2i(_max));
 390 
 391   // Allocate the next word_size words in the current allocation chunk.
 392   // If allocation would cross the _max boundary, insert a filler and begin
 393   // at the base of the next min_region_size'd chunk. Also advance to the next
 394   // chunk if we don't yet cross the boundary, but the remainder would be too
 395   // small to fill.
 396   HeapWord* new_top = old_top + word_size;
 397   size_t remainder = pointer_delta(_max, new_top);
 398   if ((new_top > _max) ||
 399       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
 400     if (old_top != _max) {
 401       size_t fill_size = pointer_delta(_max, old_top);
 402       CollectedHeap::fill_with_object(old_top, fill_size);
 403       _summary_bytes_used += fill_size * HeapWordSize;
 404     }
 405     _allocation_region->set_top(_max);
 406     old_top = _bottom = _max;
 407 
 408     // Check if we've just used up the last min_region_size'd chunk
 409     // in the current region, and if so, allocate a new one.
 410     if (_bottom != _allocation_region->end()) {
 411       _max = _bottom + HeapRegion::min_region_size_in_words();
 412     } else {
 413       if (!alloc_new_region()) {
 414         return NULL;
 415       }
 416       old_top = _allocation_region->bottom();
 417     }
 418   }
 419   _allocation_region->set_top(old_top + word_size);
 420   _summary_bytes_used += word_size * HeapWordSize;
 421 
 422   return old_top;
 423 }
 424 
 425 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 426                                           size_t end_alignment_in_bytes) {
 427   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 428          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
 429   assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
 430          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 431 
 432   // If we've allocated nothing, simply return.
 433   if (_allocation_region == NULL) {
 434     return;
 435   }
 436 
 437   // If an end alignment was requested, insert filler objects.
 438   if (end_alignment_in_bytes != 0) {
 439     HeapWord* currtop = _allocation_region->top();
 440     HeapWord* newtop = (HeapWord*)align_ptr_up(currtop, end_alignment_in_bytes);
 441     size_t fill_size = pointer_delta(newtop, currtop);
 442     if (fill_size != 0) {
 443       if (fill_size < CollectedHeap::min_fill_size()) {
 444         // If the required fill is smaller than we can represent,
 445         // bump up to the next aligned address. We know we won't exceed the current
 446         // region boundary because the max supported alignment is smaller than the min
 447         // region size, and because the allocation code never leaves space smaller than
 448         // the min_fill_size at the top of the current allocation region.
 449         newtop = (HeapWord*)align_ptr_up(currtop + CollectedHeap::min_fill_size(),
 450                                          end_alignment_in_bytes);
 451         fill_size = pointer_delta(newtop, currtop);
 452       }
 453       HeapWord* fill = archive_mem_allocate(fill_size);
 454       CollectedHeap::fill_with_objects(fill, fill_size);
 455     }
 456   }
 457 
 458   // Loop through the allocated regions, and create MemRegions summarizing
 459   // the allocated address range, combining contiguous ranges. Add the
 460   // MemRegions to the GrowableArray provided by the caller.
 461   int index = _allocated_regions.length() - 1;
 462   assert(_allocated_regions.at(index) == _allocation_region,
 463          "expected region %u at end of array, found %u",
 464          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
 465   HeapWord* base_address = _allocation_region->bottom();
 466   HeapWord* top = base_address;
 467 
 468   while (index >= 0) {
 469     HeapRegion* next = _allocated_regions.at(index);
 470     HeapWord* new_base = next->bottom();
 471     HeapWord* new_top = next->top();
 472     if (new_base != top) {
 473       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 474       base_address = new_base;
 475     }
 476     top = new_top;
 477     index = index - 1;
 478   }
 479 
 480   assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
 481   ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 482   _allocated_regions.clear();
 483   _allocation_region = NULL;
 484 };