1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1AllocRegion.inline.hpp"
  27 #include "gc/g1/g1EvacStats.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/orderAccess.inline.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
  36 HeapRegion* G1AllocRegion::_dummy_region = NULL;
  37 
  38 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
  39   assert(_dummy_region == NULL, "should be set once");
  40   assert(dummy_region != NULL, "pre-condition");
  41   assert(dummy_region->free() == 0, "pre-condition");
  42 
  43   // Make sure that any allocation attempt on this region will fail
  44   // and will not trigger any asserts.
  45   assert(dummy_region->allocate_no_bot_updates(1) == NULL, "should fail");
  46   assert(dummy_region->allocate(1) == NULL, "should fail");
  47   DEBUG_ONLY(size_t assert_tmp);
  48   assert(dummy_region->par_allocate_no_bot_updates(1, 1, &assert_tmp) == NULL, "should fail");
  49   assert(dummy_region->par_allocate(1, 1, &assert_tmp) == NULL, "should fail");
  50 
  51   _g1h = g1h;
  52   _dummy_region = dummy_region;
  53 }
  54 
  55 size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
  56   assert(alloc_region != NULL && alloc_region != _dummy_region,
  57          "pre-condition");
  58   size_t result = 0;
  59 
  60   // Other threads might still be trying to allocate using a CAS out
  61   // of the region we are trying to retire, as they can do so without
  62   // holding the lock. So, we first have to make sure that noone else
  63   // can allocate out of it by doing a maximal allocation. Even if our
  64   // CAS attempt fails a few times, we'll succeed sooner or later
  65   // given that failed CAS attempts mean that the region is getting
  66   // closed to being full.
  67   size_t free_word_size = alloc_region->free() / HeapWordSize;
  68 
  69   // This is the minimum free chunk we can turn into a dummy
  70   // object. If the free space falls below this, then noone can
  71   // allocate in this region anyway (all allocation requests will be
  72   // of a size larger than this) so we won't have to perform the dummy
  73   // allocation.
  74   size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
  75 
  76   while (free_word_size >= min_word_size_to_fill) {
  77     HeapWord* dummy = par_allocate(alloc_region, free_word_size);
  78     if (dummy != NULL) {
  79       // If the allocation was successful we should fill in the space.
  80       CollectedHeap::fill_with_object(dummy, free_word_size);
  81       alloc_region->set_pre_dummy_top(dummy);
  82       result += free_word_size * HeapWordSize;
  83       break;
  84     }
  85 
  86     free_word_size = alloc_region->free() / HeapWordSize;
  87     // It's also possible that someone else beats us to the
  88     // allocation and they fill up the region. In that case, we can
  89     // just get out of the loop.
  90   }
  91   result += alloc_region->free();
  92 
  93   assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
  94          "post-condition");
  95   return result;
  96 }
  97 
  98 size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
  99   // We never have to check whether the active region is empty or not,
 100   // and potentially free it if it is, given that it's guaranteed that
 101   // it will never be empty.
 102   size_t waste = 0;
 103   assert_alloc_region(!alloc_region->is_empty(),
 104       "the alloc region should never be empty");
 105 
 106   if (fill_up) {
 107     waste = fill_up_remaining_space(alloc_region);
 108   }
 109 
 110   assert_alloc_region(alloc_region->used() >= _used_bytes_before, "invariant");
 111   size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
 112   retire_region(alloc_region, allocated_bytes);
 113   _used_bytes_before = 0;
 114 
 115   return waste;
 116 }
 117 
 118 size_t G1AllocRegion::retire(bool fill_up) {
 119   assert_alloc_region(_alloc_region != NULL, "not initialized properly");
 120 
 121   size_t waste = 0;
 122 
 123   trace("retiring");
 124   HeapRegion* alloc_region = _alloc_region;
 125   if (alloc_region != _dummy_region) {
 126     waste = retire_internal(alloc_region, fill_up);
 127     reset_alloc_region();
 128   }
 129   trace("retired");
 130 
 131   return waste;
 132 }
 133 
 134 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
 135                                                        bool force) {
 136   assert_alloc_region(_alloc_region == _dummy_region, "pre-condition");
 137   assert_alloc_region(_used_bytes_before == 0, "pre-condition");
 138 
 139   trace("attempting region allocation");
 140   HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
 141   if (new_alloc_region != NULL) {
 142     new_alloc_region->reset_pre_dummy_top();
 143     // Need to do this before the allocation
 144     _used_bytes_before = new_alloc_region->used();
 145     HeapWord* result = allocate(new_alloc_region, word_size);
 146     assert_alloc_region(result != NULL, "the allocation should succeeded");
 147 
 148     OrderAccess::storestore();
 149     // Note that we first perform the allocation and then we store the
 150     // region in _alloc_region. This is the reason why an active region
 151     // can never be empty.
 152     update_alloc_region(new_alloc_region);
 153     trace("region allocation successful");
 154     return result;
 155   } else {
 156     trace("region allocation failed");
 157     return NULL;
 158   }
 159   ShouldNotReachHere();
 160 }
 161 
 162 void G1AllocRegion::init() {
 163   trace("initializing");
 164   assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition");
 165   assert_alloc_region(_dummy_region != NULL, "should have been set");
 166   _alloc_region = _dummy_region;
 167   _count = 0;
 168   trace("initialized");
 169 }
 170 
 171 void G1AllocRegion::set(HeapRegion* alloc_region) {
 172   trace("setting");
 173   // We explicitly check that the region is not empty to make sure we
 174   // maintain the "the alloc region cannot be empty" invariant.
 175   assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
 176   assert_alloc_region(_alloc_region == _dummy_region &&
 177                          _used_bytes_before == 0 && _count == 0,
 178                          "pre-condition");
 179 
 180   _used_bytes_before = alloc_region->used();
 181   _alloc_region = alloc_region;
 182   _count += 1;
 183   trace("set");
 184 }
 185 
 186 void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
 187   trace("update");
 188   // We explicitly check that the region is not empty to make sure we
 189   // maintain the "the alloc region cannot be empty" invariant.
 190   assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
 191 
 192   _alloc_region = alloc_region;
 193   _count += 1;
 194   trace("updated");
 195 }
 196 
 197 HeapRegion* G1AllocRegion::release() {
 198   trace("releasing");
 199   HeapRegion* alloc_region = _alloc_region;
 200   retire(false /* fill_up */);
 201   assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
 202   _alloc_region = NULL;
 203   trace("released");
 204   return (alloc_region == _dummy_region) ? NULL : alloc_region;
 205 }
 206 
 207 #ifndef PRODUCT
 208 void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) {
 209   // All the calls to trace that set either just the size or the size
 210   // and the result are considered part of detailed tracing and are
 211   // skipped during other tracing.
 212 
 213   Log(gc, alloc, region) log;
 214 
 215   if (!log.is_debug()) {
 216     return;
 217   }
 218 
 219   bool detailed_info = log.is_trace();
 220 
 221   if ((actual_word_size == 0 && result == NULL) || detailed_info) {
 222     ResourceMark rm;
 223     LogStream ls_trace(log.trace());
 224     LogStream ls_debug(log.debug());
 225     outputStream* out = detailed_info ? &ls_trace : &ls_debug;
 226 
 227     out->print("%s: %u ", _name, _count);
 228 
 229     if (_alloc_region == NULL) {
 230       out->print("NULL");
 231     } else if (_alloc_region == _dummy_region) {
 232       out->print("DUMMY");
 233     } else {
 234       out->print(HR_FORMAT, HR_FORMAT_PARAMS(_alloc_region));
 235     }
 236 
 237     out->print(" : %s", str);
 238 
 239     if (detailed_info) {
 240       if (result != NULL) {
 241         out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
 242                      min_word_size, desired_word_size, actual_word_size, p2i(result));
 243       } else if (min_word_size != 0) {
 244         out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size);
 245       }
 246     }
 247     out->cr();
 248   }
 249 }
 250 #endif // PRODUCT
 251 
 252 G1AllocRegion::G1AllocRegion(const char* name,
 253                              bool bot_updates)
 254   : _name(name), _bot_updates(bot_updates),
 255     _alloc_region(NULL), _count(0),
 256     _used_bytes_before(0) { }
 257 
 258 
 259 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
 260                                                     bool force) {
 261   return _g1h->new_mutator_alloc_region(word_size, force);
 262 }
 263 
 264 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
 265                                        size_t allocated_bytes) {
 266   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
 267 }
 268 
 269 void MutatorAllocRegion::init() {
 270   assert(_retained_alloc_region == NULL, "Pre-condition");
 271   G1AllocRegion::init();
 272   _wasted_bytes = 0;
 273 }
 274 
 275 bool MutatorAllocRegion::should_retain(HeapRegion* region) {
 276   size_t free_bytes = region->free();
 277   if (free_bytes < MinTLABSize) {
 278     return false;
 279   }
 280 
 281   if (_retained_alloc_region != NULL &&
 282       free_bytes < _retained_alloc_region->free()) {
 283     return false;
 284   }
 285 
 286   return true;
 287 }
 288 
 289 size_t MutatorAllocRegion::retire(bool fill_up) {
 290   size_t waste = 0;
 291   trace("retiring");
 292   HeapRegion* current_region = get();
 293   if (current_region != NULL) {
 294     // Retain the current region if it fits a TLAB and has more
 295     // free than the currently retained region.
 296     if (should_retain(current_region)) {
 297       trace("mutator retained");
 298       if (_retained_alloc_region != NULL) {
 299         waste = retire_internal(_retained_alloc_region, true);
 300       }
 301       _retained_alloc_region = current_region;
 302     } else {
 303       waste = retire_internal(current_region, fill_up);
 304     }
 305     reset_alloc_region();
 306   }
 307 
 308   _wasted_bytes += waste;
 309   trace("retired");
 310   return waste;
 311 }
 312 
 313 size_t MutatorAllocRegion::used_in_alloc_regions() {
 314   size_t used = 0;
 315   HeapRegion* hr = get();
 316   if (hr != NULL) {
 317     used += hr->used();
 318   }
 319 
 320   hr = _retained_alloc_region;
 321   if (hr != NULL) {
 322     used += hr->used();
 323   }
 324   return used;
 325 }
 326 
 327 HeapRegion* MutatorAllocRegion::release() {
 328   HeapRegion* ret = G1AllocRegion::release();
 329 
 330   // The retained alloc region must be retired and this must be
 331   // done after the above call to release the mutator alloc region,
 332   // since it might update the _retained_alloc_region member.
 333   if (_retained_alloc_region != NULL) {
 334     _wasted_bytes += retire_internal(_retained_alloc_region, false);
 335     _retained_alloc_region = NULL;
 336   }
 337   log_debug(gc, alloc, region)("Mutator Allocation stats, regions: %u, wasted size: " SIZE_FORMAT "%s (%4.1f%%)",
 338                                count(),
 339                                byte_size_in_proper_unit(_wasted_bytes),
 340                                proper_unit_for_byte_size(_wasted_bytes),
 341                                percent_of(_wasted_bytes, count() * HeapRegion::GrainBytes));
 342   return ret;
 343 }
 344 
 345 HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
 346                                                  bool force) {
 347   assert(!force, "not supported for GC alloc regions");
 348   return _g1h->new_gc_alloc_region(word_size, _purpose);
 349 }
 350 
 351 void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
 352                                     size_t allocated_bytes) {
 353   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose);
 354 }
 355 
 356 size_t G1GCAllocRegion::retire(bool fill_up) {
 357   HeapRegion* retired = get();
 358   size_t end_waste = G1AllocRegion::retire(fill_up);
 359   // Do not count retirement of the dummy allocation region.
 360   if (retired != NULL) {
 361     _stats->add_region_end_waste(end_waste / HeapWordSize);
 362   }
 363   return end_waste;
 364 }
 365 
 366 HeapRegion* OldGCAllocRegion::release() {
 367   HeapRegion* cur = get();
 368   if (cur != NULL) {
 369     // Determine how far we are from the next card boundary. If it is smaller than
 370     // the minimum object size we can allocate into, expand into the next card.
 371     HeapWord* top = cur->top();
 372     HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
 373 
 374     size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
 375 
 376     if (to_allocate_words != 0) {
 377       // We are not at a card boundary. Fill up, possibly into the next, taking the
 378       // end of the region and the minimum object size into account.
 379       to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
 380                                MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
 381 
 382       // Skip allocation if there is not enough space to allocate even the smallest
 383       // possible object. In this case this region will not be retained, so the
 384       // original problem cannot occur.
 385       if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
 386         HeapWord* dummy = attempt_allocation(to_allocate_words);
 387         CollectedHeap::fill_with_object(dummy, to_allocate_words);
 388       }
 389     }
 390   }
 391   return G1AllocRegion::release();
 392 }