1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.inline.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1CollectorPolicy.hpp"
  29 #include "gc/g1/g1MarkSweep.hpp"
  30 #include "gc/g1/heapRegion.inline.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 
  33 void G1DefaultAllocator::init_mutator_alloc_region() {
  34   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  35   _mutator_alloc_region.init();
  36 }
  37 
  38 void G1DefaultAllocator::release_mutator_alloc_region() {
  39   _mutator_alloc_region.release();
  40   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  41 }
  42 
  43 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
  44                                             OldGCAllocRegion* old,
  45                                             HeapRegion** retained_old) {
  46   HeapRegion* retained_region = *retained_old;
  47   *retained_old = NULL;
  48   assert(retained_region == NULL || !retained_region->is_archive(),
  49          err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
  50 
  51   // We will discard the current GC alloc region if:
  52   // a) it's in the collection set (it can happen!),
  53   // b) it's already full (no point in using it),
  54   // c) it's empty (this means that it was emptied during
  55   // a cleanup and it should be on the free list now), or
  56   // d) it's humongous (this means that it was emptied
  57   // during a cleanup and was added to the free list, but
  58   // has been subsequently used to allocate a humongous
  59   // object that may be less than the region size).
  60   if (retained_region != NULL &&
  61       !retained_region->in_collection_set() &&
  62       !(retained_region->top() == retained_region->end()) &&
  63       !retained_region->is_empty() &&
  64       !retained_region->is_humongous()) {
  65     retained_region->record_timestamp();
  66     // The retained region was added to the old region set when it was
  67     // retired. We have to remove it now, since we don't allow regions
  68     // we allocate to in the region sets. We'll re-add it later, when
  69     // it's retired again.
  70     _g1h->_old_set.remove(retained_region);
  71     bool during_im = _g1h->collector_state()->during_initial_mark_pause();
  72     retained_region->note_start_of_copying(during_im);
  73     old->set(retained_region);
  74     _g1h->_hr_printer.reuse(retained_region);
  75     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  76   }
  77 }
  78 
  79 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  80   assert_at_safepoint(true /* should_be_vm_thread */);
  81 
  82   G1Allocator::init_gc_alloc_regions(evacuation_info);
  83 
  84   _survivor_gc_alloc_region.init();
  85   _old_gc_alloc_region.init();
  86   reuse_retained_old_region(evacuation_info,
  87                             &_old_gc_alloc_region,
  88                             &_retained_old_gc_alloc_region);
  89 }
  90 
  91 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  92   AllocationContext_t context = AllocationContext::current();
  93   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
  94                                          old_gc_alloc_region(context)->count());
  95   survivor_gc_alloc_region(context)->release();
  96   // If we have an old GC alloc region to release, we'll save it in
  97   // _retained_old_gc_alloc_region. If we don't
  98   // _retained_old_gc_alloc_region will become NULL. This is what we
  99   // want either way so no reason to check explicitly for either
 100   // condition.
 101   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
 102   if (_retained_old_gc_alloc_region != NULL) {
 103     _retained_old_gc_alloc_region->record_retained_region();
 104   }
 105 
 106   if (ResizePLAB) {
 107     _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz();
 108     _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz();
 109   }
 110 }
 111 
 112 void G1DefaultAllocator::abandon_gc_alloc_regions() {
 113   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 114   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
 115   _retained_old_gc_alloc_region = NULL;
 116 }
 117 
 118 G1PLAB::G1PLAB(size_t gclab_word_size) :
 119   PLAB(gclab_word_size), _retired(true) { }
 120 
 121 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
 122   // Return the remaining space in the cur alloc region, but not less than
 123   // the min TLAB size.
 124 
 125   // Also, this value can be at most the humongous object threshold,
 126   // since we can't allow tlabs to grow big enough to accommodate
 127   // humongous objects.
 128 
 129   HeapRegion* hr = mutator_alloc_region(context)->get();
 130   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
 131   if (hr == NULL) {
 132     return max_tlab;
 133   } else {
 134     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
 135   }
 136 }
 137 
 138 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
 139                                               size_t word_size,
 140                                               AllocationContext_t context) {
 141   switch (dest.value()) {
 142     case InCSetState::Young:
 143       return survivor_attempt_allocation(word_size, context);
 144     case InCSetState::Old:
 145       return old_attempt_allocation(word_size, context);
 146     default:
 147       ShouldNotReachHere();
 148       return NULL; // Keep some compilers happy
 149   }
 150 }
 151 
 152 bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
 153   return _survivor_is_full;
 154 }
 155 
 156 bool G1Allocator::old_is_full(AllocationContext_t context) const {
 157   return _old_is_full;    
 158 }
 159 
 160 void G1Allocator::set_survivor_full(AllocationContext_t context) {
 161   _survivor_is_full = true;
 162 }
 163 
 164 void G1Allocator::set_old_full(AllocationContext_t context) {
 165   _old_is_full = true;
 166 }
 167 
 168 HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
 169                                                    AllocationContext_t context) {
 170   assert(!_g1h->is_humongous(word_size),
 171          "we should not be seeing humongous-size allocations in this path");
 172 
 173   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
 174                                                                            false /* bot_updates */);
 175   if (result == NULL && !survivor_is_full(context)) {
 176     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 177     result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
 178                                                                           false /* bot_updates */);
 179     if (result == NULL) {
 180       set_survivor_full(context);
 181     }
 182   }
 183   if (result != NULL) {
 184     _g1h->dirty_young_block(result, word_size);
 185   }
 186   return result;
 187 }
 188 
 189 HeapWord* G1Allocator::old_attempt_allocation(size_t word_size,
 190                                               AllocationContext_t context) {
 191   assert(!_g1h->is_humongous(word_size),
 192          "we should not be seeing humongous-size allocations in this path");
 193 
 194   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
 195                                                                       true /* bot_updates */);
 196   if (result == NULL && !old_is_full(context)) {
 197     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 198     result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
 199                                                                      true /* bot_updates */);
 200     if (result == NULL) {
 201       set_old_full(context);
 202     }
 203   }
 204   return result;
 205 }
 206 
 207 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
 208   _survivor_is_full = false;
 209   _old_is_full = false;
 210 }
 211 
 212 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
 213   _g1h(G1CollectedHeap::heap()),
 214   _allocator(allocator),
 215   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
 216 }
 217 
 218 HeapWord* G1PLABAllocator::allocate_inline_or_new_plab(InCSetState dest,
 219                                                        size_t word_sz,
 220                                                        AllocationContext_t context,
 221                                                        bool* plab_refill_failed) {
 222   size_t gclab_word_size = _g1h->desired_plab_sz(dest);
 223   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
 224     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 225     alloc_buf->retire();
 226 
 227     HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
 228     if (buf != NULL) {
 229       // Otherwise.
 230       alloc_buf->set_word_size(gclab_word_size);
 231       alloc_buf->set_buf(buf);
 232 
 233       HeapWord* const obj = alloc_buf->allocate(word_sz);
 234       assert(obj != NULL, "buffer was definitely big enough...");
 235       return obj;
 236     }
 237     // Otherwise.
 238     *plab_refill_failed = true;
 239   }
 240   // Try inline allocation.
 241   return _allocator->par_allocate_during_gc(dest, word_sz, context);
 242 }
 243 
 244 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 245   alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 246 }
 247 
 248 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
 249   G1PLABAllocator(allocator),
 250   _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
 251   _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
 252   for (uint state = 0; state < InCSetState::Num; state++) {
 253     _alloc_buffers[state] = NULL;
 254   }
 255   _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
 256   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 257 }
 258 
 259 void G1DefaultPLABAllocator::retire_alloc_buffers() {
 260   for (uint state = 0; state < InCSetState::Num; state++) {
 261     G1PLAB* const buf = _alloc_buffers[state];
 262     if (buf != NULL) {
 263       buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
 264     }
 265   }
 266 }
 267 
 268 void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
 269   wasted = 0;
 270   undo_wasted = 0;
 271   for (uint state = 0; state < InCSetState::Num; state++) {
 272     G1PLAB * const buf = _alloc_buffers[state];
 273     if (buf != NULL) {
 274       wasted += buf->waste();
 275       undo_wasted += buf->undo_waste();
 276     }
 277   }
 278 }
 279 
 280 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
 281   // Create the archive allocator, and also enable archive object checking
 282   // in mark-sweep, since we will be creating archive regions.
 283   G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
 284   G1MarkSweep::enable_archive_object_check();
 285   return result;
 286 }
 287 
 288 bool G1ArchiveAllocator::alloc_new_region() {
 289   // Allocate the highest free region in the reserved heap,
 290   // and add it to our list of allocated regions. It is marked
 291   // archive and added to the old set.
 292   HeapRegion* hr = _g1h->alloc_highest_free_region();
 293   if (hr == NULL) {
 294     return false;
 295   }
 296   assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
 297   hr->set_archive();
 298   _g1h->_old_set.add(hr);
 299   _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
 300   _allocated_regions.append(hr);
 301   _allocation_region = hr;
 302 
 303   // Set up _bottom and _max to begin allocating in the lowest
 304   // min_region_size'd chunk of the allocated G1 region.
 305   _bottom = hr->bottom();
 306   _max = _bottom + HeapRegion::min_region_size_in_words();
 307 
 308   // Tell mark-sweep that objects in this region are not to be marked.
 309   G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
 310 
 311   // Since we've modified the old set, call update_sizes.
 312   _g1h->g1mm()->update_sizes();
 313   return true;
 314 }
 315 
 316 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
 317   assert(word_size != 0, "size must not be zero");
 318   if (_allocation_region == NULL) {
 319     if (!alloc_new_region()) {
 320       return NULL;
 321     }
 322   }
 323   HeapWord* old_top = _allocation_region->top();
 324   assert(_bottom >= _allocation_region->bottom(),
 325          err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
 326                  p2i(_bottom), p2i(_allocation_region->bottom())));
 327   assert(_max <= _allocation_region->end(),
 328          err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
 329                  p2i(_max), p2i(_allocation_region->end())));
 330   assert(_bottom <= old_top && old_top <= _max,
 331          err_msg("inconsistent allocation state: expected "
 332                  PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
 333                  p2i(_bottom), p2i(old_top), p2i(_max)));
 334 
 335   // Allocate the next word_size words in the current allocation chunk.
 336   // If allocation would cross the _max boundary, insert a filler and begin
 337   // at the base of the next min_region_size'd chunk. Also advance to the next
 338   // chunk if we don't yet cross the boundary, but the remainder would be too
 339   // small to fill.
 340   HeapWord* new_top = old_top + word_size;
 341   size_t remainder = pointer_delta(_max, new_top);
 342   if ((new_top > _max) ||
 343       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
 344     if (old_top != _max) {
 345       size_t fill_size = pointer_delta(_max, old_top);
 346       CollectedHeap::fill_with_object(old_top, fill_size);
 347       _summary_bytes_used += fill_size * HeapWordSize;
 348     }
 349     _allocation_region->set_top(_max);
 350     old_top = _bottom = _max;
 351 
 352     // Check if we've just used up the last min_region_size'd chunk
 353     // in the current region, and if so, allocate a new one.
 354     if (_bottom != _allocation_region->end()) {
 355       _max = _bottom + HeapRegion::min_region_size_in_words();
 356     } else {
 357       if (!alloc_new_region()) {
 358         return NULL;
 359       }
 360       old_top = _allocation_region->bottom();
 361     }
 362   }
 363   _allocation_region->set_top(old_top + word_size);
 364   _summary_bytes_used += word_size * HeapWordSize;
 365 
 366   return old_top;
 367 }
 368 
 369 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 370                                           size_t end_alignment_in_bytes) {
 371   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 372           err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
 373   assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
 374          err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
 375 
 376   // If we've allocated nothing, simply return.
 377   if (_allocation_region == NULL) {
 378     return;
 379   }
 380 
 381   // If an end alignment was requested, insert filler objects.
 382   if (end_alignment_in_bytes != 0) {
 383     HeapWord* currtop = _allocation_region->top();
 384     HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
 385     size_t fill_size = pointer_delta(newtop, currtop);
 386     if (fill_size != 0) {
 387       if (fill_size < CollectedHeap::min_fill_size()) {
 388         // If the required fill is smaller than we can represent,
 389         // bump up to the next aligned address. We know we won't exceed the current
 390         // region boundary because the max supported alignment is smaller than the min
 391         // region size, and because the allocation code never leaves space smaller than
 392         // the min_fill_size at the top of the current allocation region.
 393         newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
 394                                              end_alignment_in_bytes);
 395         fill_size = pointer_delta(newtop, currtop);
 396       }
 397       HeapWord* fill = archive_mem_allocate(fill_size);
 398       CollectedHeap::fill_with_objects(fill, fill_size);
 399     }
 400   }
 401 
 402   // Loop through the allocated regions, and create MemRegions summarizing
 403   // the allocated address range, combining contiguous ranges. Add the
 404   // MemRegions to the GrowableArray provided by the caller.
 405   int index = _allocated_regions.length() - 1;
 406   assert(_allocated_regions.at(index) == _allocation_region,
 407          err_msg("expected region %u at end of array, found %u",
 408                  _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
 409   HeapWord* base_address = _allocation_region->bottom();
 410   HeapWord* top = base_address;
 411 
 412   while (index >= 0) {
 413     HeapRegion* next = _allocated_regions.at(index);
 414     HeapWord* new_base = next->bottom();
 415     HeapWord* new_top = next->top();
 416     if (new_base != top) {
 417       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 418       base_address = new_base;
 419     }
 420     top = new_top;
 421     index = index - 1;
 422   }
 423 
 424   assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
 425   ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 426   _allocated_regions.clear();
 427   _allocation_region = NULL;
 428 };