1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1CollectorPolicy.hpp"
  29 #include "gc/g1/g1MarkSweep.hpp"
  30 #include "gc/g1/heapRegion.inline.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 
  33 void G1DefaultAllocator::init_mutator_alloc_region() {
  34   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  35   _mutator_alloc_region.init();
  36 }
  37 
  38 void G1DefaultAllocator::release_mutator_alloc_region() {
  39   _mutator_alloc_region.release();
  40   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  41 }
  42 
  43 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
  44                                             OldGCAllocRegion* old,
  45                                             HeapRegion** retained_old) {
  46   HeapRegion* retained_region = *retained_old;
  47   *retained_old = NULL;
  48   assert(retained_region == NULL || !retained_region->is_archive(),
  49          "Archive region should not be alloc region");
  50 
  51   // We will discard the current GC alloc region if:
  52   // a) it's in the collection set (it can happen!),
  53   // b) it's already full (no point in using it),
  54   // c) it's empty (this means that it was emptied during
  55   // a cleanup and it should be on the free list now), or
  56   // d) it's humongous (this means that it was emptied
  57   // during a cleanup and was added to the free list, but
  58   // has been subsequently used to allocate a humongous
  59   // object that may be less than the region size).
  60   if (retained_region != NULL &&
  61       !retained_region->in_collection_set() &&
  62       !(retained_region->top() == retained_region->end()) &&
  63       !retained_region->is_empty() &&
  64       !retained_region->is_humongous()) {
  65     retained_region->record_timestamp();
  66     // The retained region was added to the old region set when it was
  67     // retired. We have to remove it now, since we don't allow regions
  68     // we allocate to in the region sets. We'll re-add it later, when
  69     // it's retired again.
  70     _g1h->_old_set.remove(retained_region);
  71     bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
  72     retained_region->note_start_of_copying(during_im);
  73     old->set(retained_region);
  74     _g1h->_hr_printer.reuse(retained_region);
  75     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  76   }
  77 }
  78 
  79 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  80   assert_at_safepoint(true /* should_be_vm_thread */);
  81 
  82   _survivor_gc_alloc_region.init();
  83   _old_gc_alloc_region.init();
  84   reuse_retained_old_region(evacuation_info,
  85                             &_old_gc_alloc_region,
  86                             &_retained_old_gc_alloc_region);
  87 }
  88 
  89 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  90   AllocationContext_t context = AllocationContext::current();
  91   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
  92                                          old_gc_alloc_region(context)->count());
  93   survivor_gc_alloc_region(context)->release();
  94   // If we have an old GC alloc region to release, we'll save it in
  95   // _retained_old_gc_alloc_region. If we don't
  96   // _retained_old_gc_alloc_region will become NULL. This is what we
  97   // want either way so no reason to check explicitly for either
  98   // condition.
  99   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
 100   if (_retained_old_gc_alloc_region != NULL) {
 101     _retained_old_gc_alloc_region->record_retained_region();
 102   }
 103 
 104   if (ResizePLAB) {
 105     _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers);
 106     _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers);
 107   }
 108 }
 109 
 110 void G1DefaultAllocator::abandon_gc_alloc_regions() {
 111   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 112   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 113   _retained_old_gc_alloc_region = NULL;
 114 }
 115 
 116 G1PLAB::G1PLAB(size_t gclab_word_size) :
 117   PLAB(gclab_word_size), _retired(true) { }
 118 
 119 HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
 120                                                         size_t word_sz,
 121                                                         AllocationContext_t context) {
 122   size_t gclab_word_size = _g1h->desired_plab_sz(dest);
 123   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
 124     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 125     alloc_buf->retire();
 126 
 127     HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
 128     if (buf == NULL) {
 129       return NULL; // Let caller handle allocation failure.
 130     }
 131     // Otherwise.
 132     alloc_buf->set_word_size(gclab_word_size);
 133     alloc_buf->set_buf(buf);
 134 
 135     HeapWord* const obj = alloc_buf->allocate(word_sz);
 136     assert(obj != NULL, "buffer was definitely big enough...");
 137     return obj;
 138   } else {
 139     return _g1h->par_allocate_during_gc(dest, word_sz, context);
 140   }
 141 }
 142 
 143 G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
 144   G1ParGCAllocator(g1h),
 145   _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
 146   _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
 147   for (uint state = 0; state < InCSetState::Num; state++) {
 148     _alloc_buffers[state] = NULL;
 149   }
 150   _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
 151   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 152 }
 153 
 154 void G1DefaultParGCAllocator::retire_alloc_buffers() {
 155   for (uint state = 0; state < InCSetState::Num; state++) {
 156     G1PLAB* const buf = _alloc_buffers[state];
 157     if (buf != NULL) {
 158       buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
 159     }
 160   }
 161 }
 162 
 163 void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
 164   wasted = 0;
 165   undo_wasted = 0;
 166   for (uint state = 0; state < InCSetState::Num; state++) {
 167     G1PLAB * const buf = _alloc_buffers[state];
 168     if (buf != NULL) {
 169       wasted += buf->waste();
 170       undo_wasted += buf->undo_waste();
 171     }
 172   }
 173 }
 174 
 175 
 176 G1RecordingAllocator* G1RecordingAllocator::create_allocator(G1CollectedHeap* g1h) {
 177   // Create the recording allocator, and also enable archive object checking
 178   // in mark-sweep, since we will be creating archive regions.
 179   G1RecordingAllocator* result =  new G1RecordingAllocator(g1h);
 180   G1MarkSweep::enable_archive_object_check();
 181   return result;
 182 }
 183 
 184 HeapRegion* G1RecordingAllocator::alloc_new_region() {
 185   // Allocate the highest available region in the reserved heap,
 186   // and add it to our list of allocated regions.  It is marked
 187   // archive and added to the old set.
 188   HeapRegion* hr = _g1h->alloc_highest_available_region();
 189   assert(hr->top() == hr->bottom(), "expected empty region");
 190   hr->set_archive();
 191   _g1h->_old_set.add(hr);
 192   _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
 193   _allocated_regions.append(hr);
 194   _allocation_region = hr;
 195 
 196   // Set up _bottom and _max to begin allocating in the lowest
 197   // min_region_size'd chunk of the allocated G1 region.
 198   _bottom = hr->bottom();
 199   _max = _bottom + HeapRegion::min_region_size_in_words();
 200 
 201   // Tell mark-sweep that objects in this region are not to be marked.
 202   G1MarkSweep::mark_range_archive(_bottom, hr->end() - 1);
 203 
 204   // Since we've modified the old set, call update_sizes.
 205   _g1h->g1mm()->update_sizes();
 206   return hr;
 207 }
 208 
 209 HeapWord* G1RecordingAllocator::record_mem_allocate(size_t word_size) {
 210   if (_allocation_region == NULL) {
 211     alloc_new_region();
 212   }
 213   HeapWord* old_top = _allocation_region->top();
 214   assert(_bottom >= _allocation_region->bottom(), "inconsistent allocation state");
 215   assert(_max <= _allocation_region->end(), "inconsistent allocation state");
 216   assert(_bottom <= old_top && old_top <= _max, "inconsistent allocation state");
 217 
 218   // Allocate the next word_size words in the current allocation chunk.
 219   // If allocation would cross the _max boundary, insert a fill and begin
 220   // at the base of the next min_region_size'd chunk. Also advance to the next
 221   // chunk if we don't yet cross the boundary, but the remainder would be too 
 222   // small to fill.
 223   HeapWord* new_top = old_top + word_size;
 224   size_t remainder = (size_t)(_max - new_top);
 225   if ((new_top > _max) || 
 226       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
 227     if (old_top != _max) {
 228       size_t fill_size = _max - old_top;
 229       CollectedHeap::fill_with_object(old_top, fill_size);
 230       _summary_bytes_used += fill_size * HeapWordSize;
 231     }
 232     _allocation_region->set_top(_max);
 233     old_top = _bottom = _max;
 234 
 235     // Check if we've just used up the last min_region_size'd chunk 
 236     // in the current region, and if so, allocate a new one.
 237     if (_bottom != _allocation_region->end()) {
 238       _max = _bottom + HeapRegion::min_region_size_in_words();
 239     } else {
 240       alloc_new_region();
 241       old_top = _allocation_region->bottom();
 242     }
 243   }
 244   _allocation_region->set_top(old_top + word_size);
 245   _summary_bytes_used += word_size * HeapWordSize;
 246 
 247   return old_top;
 248 }
 249 
 250 void G1RecordingAllocator::complete_recording(GrowableArray<MemRegion>* ranges,
 251                                               uint end_alignment) {
 252   assert((end_alignment >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 253          "alignment too large");
 254   // If we've allocated nothing, simply return.
 255   if (_allocation_region == NULL) {
 256     return;
 257   }
 258 
 259   // If an end alignment was requested, insert filler objects.
 260   if (end_alignment != 0) {
 261     HeapWord* currtop = _allocation_region->top();
 262     HeapWord* newtop = (HeapWord*)round_to((intptr_t)currtop, end_alignment);
 263     size_t fill_size = newtop - currtop;
 264     if (fill_size != 0) {
 265       HeapWord* fill = record_mem_allocate(fill_size);
 266       CollectedHeap::fill_with_objects(fill, fill_size);
 267     }
 268   }
 269 
 270   // Loop through the allocated regions, and create MemRegions summarizing
 271   // the allocated address range, combining contiguous ranges.  Add the
 272   // MemRegions to the growable array provided by the caller.
 273   int index = _allocated_regions.length() - 1;
 274   assert(_allocated_regions.at(index) == _allocation_region, "expect current region at end of array");
 275   HeapWord* base_address = _allocation_region->bottom();
 276   HeapWord* top = base_address;
 277 
 278   while (index >= 0) {
 279     HeapRegion* next = _allocated_regions.at(index--);
 280     HeapWord* new_base = next->bottom();
 281     HeapWord* new_top = next->top();
 282     if (new_base != top) {
 283       ranges->append(MemRegion(base_address, top - base_address));
 284       base_address = new_base;
 285     }
 286     top = new_top;
 287   }
 288 
 289   ranges->append(MemRegion(base_address, top - base_address));
 290   _allocated_regions.clear();
 291   _allocation_region = NULL;
 292 
 293   return;
 294 
 295 };