1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1Allocator.hpp"
  27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  29 #include "gc_implementation/g1/heapRegion.inline.hpp"
  30 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  31 
  32 size_t G1Allocator::desired_plab_size(InCSetState dest) {
  33   size_t gclab_word_size = evac_stats(dest)->desired_plab_sz();
  34   // Prevent humongous PLAB sizes for two reasons:
  35   // * PLABs are allocated using a similar paths as oops, but should
  36   //   never be in a humongous region
  37   // * Allowing humongous PLABs needlessly churns the region free lists
  38   return MIN2(G1CollectedHeap::heap()->humongous_object_threshold_in_words(), gclab_word_size);
  39 }
  40 
  41 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
  42                                                           size_t& word_size,
  43                                                           AllocationContext_t context) {
  44   assert(!G1CollectedHeap::heap()->is_humongous(word_size),
  45          "we should not be seeing humongous-size allocations in this path");
  46 
  47   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
  48                                                                            word_size,
  49                                                                            false /* bot_updates */);
  50   if (result == NULL) {
  51     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  52     result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
  53                                                                           word_size,
  54                                                                           false /* bot_updates */);
  55   }
  56   if (result != NULL) {
  57     _g1h->dirty_young_block(result, word_size);
  58   }
  59   return result;
  60 }
  61 
  62 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
  63                                                          size_t& word_size,
  64                                                          AllocationContext_t context) {
  65   assert(!G1CollectedHeap::heap()->is_humongous(word_size),
  66          "we should not be seeing humongous-size allocations in this path");
  67 
  68   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
  69                                                                       word_size,
  70                                                                       true /* bot_updates */);
  71   if (result == NULL) {
  72     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  73     result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
  74                                                                      word_size,
  75                                                                      true /* bot_updates */);
  76   }
  77   return result;
  78 }
  79 
  80 
  81 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
  82                                                      size_t min_word_size,
  83                                                      size_t& word_size,
  84                                                      AllocationContext_t context) {
  85   switch (dest.value()) {
  86     case InCSetState::Young:
  87       return survivor_attempt_allocation(min_word_size, word_size, context);
  88     case InCSetState::Old:
  89       return old_attempt_allocation(min_word_size, word_size, context);
  90     default:
  91       ShouldNotReachHere();
  92       return NULL; // Keep some compilers happy
  93   }
  94 }
  95 
  96 size_t G1Allocator::unsafe_max_tlab_alloc() {
  97   // Return the remaining space in the cur alloc region, but not less than
  98   // the min TLAB size.
  99 
 100   // Also, this value can be at most the humongous object threshold,
 101   // since we can't allow tlabs to grow big enough to accommodate
 102   // humongous objects.
 103 
 104   HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 105   size_t max_tlab = G1CollectedHeap::heap()->max_tlab_size() * wordSize;
 106   if (hr == NULL) {
 107     return max_tlab;
 108   } else {
 109     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
 110   }
 111 }
 112 
 113 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
 114   G1Allocator(heap),
 115   _retained_old_gc_alloc_region(NULL),
 116   _survivor_plab_stats(YoungPLABSize, PLABWeight),
 117   _old_plab_stats(OldPLABSize, PLABWeight),
 118   _survivor_gc_alloc_region(evac_stats(InCSetState::Young)),
 119   _old_gc_alloc_region(evac_stats(InCSetState::Old)) {
 120 }
 121 
 122 void G1DefaultAllocator::init_mutator_alloc_region() {
 123   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
 124   _mutator_alloc_region.init();
 125 }
 126 
 127 void G1DefaultAllocator::release_mutator_alloc_region() {
 128   _mutator_alloc_region.release();
 129   assert(_mutator_alloc_region.get() == NULL, "post-condition");
 130 }
 131 
 132 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
 133                                             OldGCAllocRegion* old,
 134                                             HeapRegion** retained_old) {
 135   HeapRegion* retained_region = *retained_old;
 136   *retained_old = NULL;
 137 
 138   // We will discard the current GC alloc region if:
 139   // a) it's in the collection set (it can happen!),
 140   // b) it's already full (no point in using it),
 141   // c) it's empty (this means that it was emptied during
 142   // a cleanup and it should be on the free list now), or
 143   // d) it's humongous (this means that it was emptied
 144   // during a cleanup and was added to the free list, but
 145   // has been subsequently used to allocate a humongous
 146   // object that may be less than the region size).
 147   if (retained_region != NULL &&
 148       !retained_region->in_collection_set() &&
 149       !(retained_region->top() == retained_region->end()) &&
 150       !retained_region->is_empty() &&
 151       !retained_region->is_humongous()) {
 152     retained_region->record_timestamp();
 153     // The retained region was added to the old region set when it was
 154     // retired. We have to remove it now, since we don't allow regions
 155     // we allocate to in the region sets. We'll re-add it later, when
 156     // it's retired again.
 157     _g1h->old_set_remove(retained_region);
 158     bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
 159     retained_region->note_start_of_copying(during_im);
 160     old->set(retained_region);
 161     _g1h->hr_printer()->reuse(retained_region);
 162     evacuation_info.set_alloc_regions_used_before(retained_region->used());
 163   }
 164 }
 165 
 166 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
 167   assert_at_safepoint(true /* should_be_vm_thread */);
 168 
 169   _survivor_gc_alloc_region.init();
 170   _old_gc_alloc_region.init();
 171   reuse_retained_old_region(evacuation_info,
 172                             &_old_gc_alloc_region,
 173                             &_retained_old_gc_alloc_region);
 174 }
 175 
 176 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
 177   AllocationContext_t context = AllocationContext::current();
 178   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
 179                                          old_gc_alloc_region(context)->count());
 180   survivor_gc_alloc_region(context)->release();
 181   // If we have an old GC alloc region to release, we'll save it in
 182   // _retained_old_gc_alloc_region. If we don't
 183   // _retained_old_gc_alloc_region will become NULL. This is what we
 184   // want either way so no reason to check explicitly for either
 185   // condition.
 186   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
 187   if (_retained_old_gc_alloc_region != NULL) {
 188     _retained_old_gc_alloc_region->record_retained_region();
 189   }
 190 
 191   evac_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers);
 192   evac_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers);
 193 }
 194 
 195 void G1DefaultAllocator::abandon_gc_alloc_regions() {
 196   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 197   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 198   _retained_old_gc_alloc_region = NULL;
 199 }
 200 
 201 G1EvacStats* G1DefaultAllocator::evac_stats(InCSetState dest) {
 202   switch (dest.value()) {
 203     case InCSetState::Young:
 204       return &_survivor_plab_stats;
 205     case InCSetState::Old:
 206       return &_old_plab_stats;
 207     default:
 208       ShouldNotReachHere();
 209       return NULL; // Keep some compilers happy
 210   }
 211 }
 212 
 213 G1PLAB::G1PLAB(size_t gclab_word_size) :
 214   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
 215 
 216 HeapWord* PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
 217                                                      size_t word_sz,
 218                                                      AllocationContext_t context) {
 219   size_t plab_word_size = _allocator->desired_plab_size(dest);
 220   size_t required_in_plab = word_sz + ParGCAllocBuffer::AlignmentReserve;
 221 
 222   // Only get a new PLAB if the allocation fits and it would not waste more than
 223   // ParallelGCBufferWastePct in the existing buffer.
 224   if ((required_in_plab <= plab_word_size) &&
 225     (required_in_plab * 100 < plab_word_size * ParallelGCBufferWastePct)) {
 226     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 227     alloc_buf->retire();
 228 
 229     HeapWord* buf = _allocator->par_allocate_during_gc(dest,
 230                                                            required_in_plab,
 231                                                            plab_word_size,
 232                                                            context);
 233     if (buf == NULL) {
 234       return NULL; // Let caller handle allocation failure.
 235     }
 236     // Otherwise.
 237     alloc_buf->set_word_size(plab_word_size);
 238     alloc_buf->set_buf(buf);
 239 
 240     HeapWord* const obj = alloc_buf->allocate(word_sz);
 241     assert(obj != NULL, err_msg("PLAB should have been big enough, tried to allocate "
 242                                 SIZE_FORMAT" with alignment reserve " SIZE_FORMAT" PLAB size "SIZE_FORMAT,
 243                                 word_sz, ParGCAllocBuffer::AlignmentReserve, plab_word_size));
 244     return obj;
 245   } else {
 246     HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
 247     if (result != NULL) {
 248       _inline_allocated[dest.value()] += word_sz;
 249     }
 250     return result;
 251   }
 252 }
 253 
 254 void PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 255   if (alloc_buffer(dest, context)->contains(obj)) {
 256     assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 257            "should contain whole object");
 258     alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 259   } else {
 260     CollectedHeap::fill_with_object(obj, word_sz);
 261     _undo_waste[dest.value()] += word_sz;
 262   }
 263 }
 264 
 265 DefaultPLABAllocator::DefaultPLABAllocator(G1Allocator* allocator) :
 266   PLABAllocator(allocator),
 267   _surviving_alloc_buffer(allocator->desired_plab_size(InCSetState::Young)),
 268   _tenured_alloc_buffer(allocator->desired_plab_size(InCSetState::Old)) {
 269   for (uint state = 0; state < InCSetState::Num; state++) {
 270     _alloc_buffers[state] = NULL;
 271   }
 272   _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
 273   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 274 }
 275 
 276 size_t DefaultPLABAllocator::lab_waste(InCSetState value) const {
 277   assert(value.is_valid_gen(), err_msg("Given CSetState " CSETSTATE_FORMAT" value must represent a generation", value.value()));
 278   return _alloc_buffers[value.value()]->wasted();
 279 }
 280 
 281 void DefaultPLABAllocator::flush_stats_and_retire() {
 282   for (uint state = 0; state < InCSetState::Num; state++) {
 283     G1PLAB* const buf = _alloc_buffers[state];
 284     if (buf != NULL) {
 285       G1EvacStats* stats = _allocator->evac_stats(state);
 286       buf->flush_and_retire_stats(stats);
 287       stats->add_inline_allocated(_inline_allocated[state]);
 288       stats->add_undo_waste(_undo_waste[state]);
 289       _inline_allocated[state] = 0;
 290       _undo_waste[state] = 0;
 291     }
 292   }
 293 }