1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1Allocator.hpp"
  27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  29 #include "gc_implementation/g1/heapRegion.inline.hpp"
  30 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  31 
  32 void G1DefaultAllocator::init_mutator_alloc_region() {
  33   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  34   _mutator_alloc_region.init();
  35 }
  36 
  37 void G1DefaultAllocator::release_mutator_alloc_region() {
  38   _mutator_alloc_region.release();
  39   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  40 }
  41 
  42 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
  43                                             OldGCAllocRegion* old,
  44                                             HeapRegion** retained_old) {
  45   HeapRegion* retained_region = *retained_old;
  46   *retained_old = NULL;
  47 
  48   // We will discard the current GC alloc region if:
  49   // a) it's in the collection set (it can happen!),
  50   // b) it's already full (no point in using it),
  51   // c) it's empty (this means that it was emptied during
  52   // a cleanup and it should be on the free list now), or
  53   // d) it's humongous (this means that it was emptied
  54   // during a cleanup and was added to the free list, but
  55   // has been subsequently used to allocate a humongous
  56   // object that may be less than the region size).
  57   if (retained_region != NULL &&
  58       !retained_region->in_collection_set() &&
  59       !(retained_region->top() == retained_region->end()) &&
  60       !retained_region->is_empty() &&
  61       !retained_region->is_humongous()) {
  62     retained_region->record_timestamp();
  63     // The retained region was added to the old region set when it was
  64     // retired. We have to remove it now, since we don't allow regions
  65     // we allocate to in the region sets. We'll re-add it later, when
  66     // it's retired again.
  67     _g1h->_old_set.remove(retained_region);
  68     bool during_im = _g1h->collector_state()->during_initial_mark_pause();
  69     retained_region->note_start_of_copying(during_im);
  70     old->set(retained_region);
  71     _g1h->_hr_printer.reuse(retained_region);
  72     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  73   }
  74 }
  75 
  76 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  77   assert_at_safepoint(true /* should_be_vm_thread */);
  78 
  79   _survivor_gc_alloc_region.init();
  80   _old_gc_alloc_region.init();
  81   reuse_retained_old_region(evacuation_info,
  82                             &_old_gc_alloc_region,
  83                             &_retained_old_gc_alloc_region);
  84 }
  85 
  86 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  87   AllocationContext_t context = AllocationContext::current();
  88   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
  89                                          old_gc_alloc_region(context)->count());
  90   survivor_gc_alloc_region(context)->release();
  91   // If we have an old GC alloc region to release, we'll save it in
  92   // _retained_old_gc_alloc_region. If we don't
  93   // _retained_old_gc_alloc_region will become NULL. This is what we
  94   // want either way so no reason to check explicitly for either
  95   // condition.
  96   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
  97   if (_retained_old_gc_alloc_region != NULL) {
  98     _retained_old_gc_alloc_region->record_retained_region();
  99   }
 100 
 101   if (ResizePLAB) {
 102     _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers);
 103     _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers);
 104   }
 105 }
 106 
 107 void G1DefaultAllocator::abandon_gc_alloc_regions() {
 108   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 109   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 110   _retained_old_gc_alloc_region = NULL;
 111 }
 112 
 113 G1PLAB::G1PLAB(size_t gclab_word_size) :
 114   PLAB(gclab_word_size), _retired(true) { }
 115 
 116 HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
 117                                                         size_t word_sz,
 118                                                         AllocationContext_t context) {
 119   size_t gclab_word_size = _g1h->desired_plab_sz(dest);
 120   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
 121     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 122     alloc_buf->retire();
 123 
 124     HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
 125     if (buf == NULL) {
 126       return NULL; // Let caller handle allocation failure.
 127     }
 128     // Otherwise.
 129     alloc_buf->set_word_size(gclab_word_size);
 130     alloc_buf->set_buf(buf);
 131 
 132     HeapWord* const obj = alloc_buf->allocate(word_sz);
 133     assert(obj != NULL, "buffer was definitely big enough...");
 134     return obj;
 135   } else {
 136     return _g1h->par_allocate_during_gc(dest, word_sz, context);
 137   }
 138 }
 139 
 140 G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
 141   G1ParGCAllocator(g1h),
 142   _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
 143   _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
 144   for (uint state = 0; state < InCSetState::Num; state++) {
 145     _alloc_buffers[state] = NULL;
 146   }
 147   _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
 148   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 149 }
 150 
 151 void G1DefaultParGCAllocator::retire_alloc_buffers() {
 152   for (uint state = 0; state < InCSetState::Num; state++) {
 153     G1PLAB* const buf = _alloc_buffers[state];
 154     if (buf != NULL) {
 155       buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
 156     }
 157   }
 158 }
 159 
 160 void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
 161   wasted = 0;
 162   undo_wasted = 0;
 163   for (uint state = 0; state < InCSetState::Num; state++) {
 164     G1PLAB * const buf = _alloc_buffers[state];
 165     if (buf != NULL) {
 166       wasted += buf->waste();
 167       undo_wasted += buf->undo_waste();
 168     }
 169   }
 170 }