1 /*
   2  * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1AllocRegion.inline.hpp"
  27 #include "gc/g1/g1EvacStats.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/orderAccess.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
  36 HeapRegion* G1AllocRegion::_dummy_region = NULL;
  37 
  38 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
  39   assert(_dummy_region == NULL, "should be set once");
  40   assert(dummy_region != NULL, "pre-condition");
  41   assert(dummy_region->free() == 0, "pre-condition");
  42 
  43   // Make sure that any allocation attempt on this region will fail
  44   // and will not trigger any asserts.
  45   assert(dummy_region->allocate_no_bot_updates(1) == NULL, "should fail");
  46   assert(dummy_region->allocate(1) == NULL, "should fail");
  47   DEBUG_ONLY(size_t assert_tmp);
  48   assert(dummy_region->par_allocate_no_bot_updates(1, 1, &assert_tmp) == NULL, "should fail");
  49   assert(dummy_region->par_allocate(1, 1, &assert_tmp) == NULL, "should fail");
  50 
  51   _g1h = g1h;
  52   _dummy_region = dummy_region;
  53 }
  54 
  55 size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
  56   assert(alloc_region != NULL && alloc_region != _dummy_region,
  57          "pre-condition");
  58   size_t result = 0;
  59 
  60   // Other threads might still be trying to allocate using a CAS out
  61   // of the region we are trying to retire, as they can do so without
  62   // holding the lock. So, we first have to make sure that noone else
  63   // can allocate out of it by doing a maximal allocation. Even if our
  64   // CAS attempt fails a few times, we'll succeed sooner or later
  65   // given that failed CAS attempts mean that the region is getting
  66   // closed to being full.
  67   size_t free_word_size = alloc_region->free() / HeapWordSize;
  68 
  69   // This is the minimum free chunk we can turn into a dummy
  70   // object. If the free space falls below this, then noone can
  71   // allocate in this region anyway (all allocation requests will be
  72   // of a size larger than this) so we won't have to perform the dummy
  73   // allocation.
  74   size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
  75 
  76   while (free_word_size >= min_word_size_to_fill) {
  77     HeapWord* dummy = par_allocate(alloc_region, free_word_size);
  78     if (dummy != NULL) {
  79       // If the allocation was successful we should fill in the space.
  80       CollectedHeap::fill_with_object(dummy, free_word_size);
  81       alloc_region->set_pre_dummy_top(dummy);
  82       result += free_word_size * HeapWordSize;
  83       break;
  84     }
  85 
  86     free_word_size = alloc_region->free() / HeapWordSize;
  87     // It's also possible that someone else beats us to the
  88     // allocation and they fill up the region. In that case, we can
  89     // just get out of the loop.
  90   }
  91   result += alloc_region->free();
  92 
  93   assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
  94          "post-condition");
  95   return result;
  96 }
  97 
  98 size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
  99   // We never have to check whether the active region is empty or not,
 100   // and potentially free it if it is, given that it's guaranteed that
 101   // it will never be empty.
 102   size_t waste = 0;
 103   assert_alloc_region(!alloc_region->is_empty(),
 104       "the alloc region should never be empty");
 105 
 106   if (fill_up) {
 107     waste = fill_up_remaining_space(alloc_region);
 108   }
 109 
 110   assert_alloc_region(alloc_region->used() >= _used_bytes_before, "invariant");
 111   size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
 112   retire_region(alloc_region, allocated_bytes);
 113   _used_bytes_before = 0;
 114 
 115   return waste;
 116 }
 117 
 118 size_t G1AllocRegion::retire(bool fill_up) {
 119   assert_alloc_region(_alloc_region != NULL, "not initialized properly");
 120 
 121   size_t waste = 0;
 122 
 123   trace("retiring");
 124   HeapRegion* alloc_region = _alloc_region;
 125   if (alloc_region != _dummy_region) {
 126     waste = retire_internal(alloc_region, fill_up);
 127     reset_alloc_region();
 128   }
 129   trace("retired");
 130 
 131   return waste;
 132 }
 133 
 134 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
 135                                                        bool force) {
 136   assert_alloc_region(_alloc_region == _dummy_region, "pre-condition");
 137   assert_alloc_region(_used_bytes_before == 0, "pre-condition");
 138 
 139   trace("attempting region allocation");
 140   HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
 141   if (new_alloc_region != NULL) {
 142     new_alloc_region->reset_pre_dummy_top();
 143     // Need to do this before the allocation
 144     _used_bytes_before = new_alloc_region->used();
 145     HeapWord* result = allocate(new_alloc_region, word_size);
 146     assert_alloc_region(result != NULL, "the allocation should succeeded");
 147 
 148     OrderAccess::storestore();
 149     // Note that we first perform the allocation and then we store the
 150     // region in _alloc_region. This is the reason why an active region
 151     // can never be empty.
 152     update_alloc_region(new_alloc_region);
 153     trace("region allocation successful");
 154     return result;
 155   } else {
 156     trace("region allocation failed");
 157     return NULL;
 158   }
 159   ShouldNotReachHere();
 160 }
 161 
 162 void G1AllocRegion::init() {
 163   trace("initializing");
 164   assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition");
 165   assert_alloc_region(_dummy_region != NULL, "should have been set");
 166   _alloc_region = _dummy_region;
 167   _count = 0;
 168   trace("initialized");
 169 }
 170 
 171 void G1AllocRegion::set(HeapRegion* alloc_region) {
 172   trace("setting");
 173   // We explicitly check that the region is not empty to make sure we
 174   // maintain the "the alloc region cannot be empty" invariant.
 175   assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
 176   assert_alloc_region(_alloc_region == _dummy_region &&
 177                          _used_bytes_before == 0 && _count == 0,
 178                          "pre-condition");
 179 
 180   _used_bytes_before = alloc_region->used();
 181   _alloc_region = alloc_region;
 182   _count += 1;
 183   trace("set");
 184 }
 185 
 186 void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
 187   trace("update");
 188   // We explicitly check that the region is not empty to make sure we
 189   // maintain the "the alloc region cannot be empty" invariant.
 190   assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
 191 
 192   _alloc_region = alloc_region;
 193   _count += 1;
 194   trace("updated");
 195 }
 196 
 197 HeapRegion* G1AllocRegion::release() {
 198   trace("releasing");
 199   HeapRegion* alloc_region = _alloc_region;
 200   retire(false /* fill_up */);
 201   assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
 202   _alloc_region = NULL;
 203   trace("released");
 204   return (alloc_region == _dummy_region) ? NULL : alloc_region;
 205 }
 206 
 207 #ifndef PRODUCT
 208 void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) {
 209   // All the calls to trace that set either just the size or the size
 210   // and the result are considered part of detailed tracing and are
 211   // skipped during other tracing.
 212 
 213   Log(gc, alloc, region) log;
 214 
 215   if (!log.is_debug()) {
 216     return;
 217   }
 218 
 219   bool detailed_info = log.is_trace();
 220 
 221   if ((actual_word_size == 0 && result == NULL) || detailed_info) {
 222     ResourceMark rm;
 223     LogStream ls_trace(log.trace());
 224     LogStream ls_debug(log.debug());
 225     outputStream* out = detailed_info ? &ls_trace : &ls_debug;
 226 
 227     out->print("%s: %u ", _name, _count);
 228 
 229     if (_alloc_region == NULL) {
 230       out->print("NULL");
 231     } else if (_alloc_region == _dummy_region) {
 232       out->print("DUMMY");
 233     } else {
 234       out->print(HR_FORMAT, HR_FORMAT_PARAMS(_alloc_region));
 235     }
 236 
 237     out->print(" : %s", str);
 238 
 239     if (detailed_info) {
 240       if (result != NULL) {
 241         out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
 242                      min_word_size, desired_word_size, actual_word_size, p2i(result));
 243       } else if (min_word_size != 0) {
 244         out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size);
 245       }
 246     }
 247     out->cr();
 248   }
 249 }
 250 #endif // PRODUCT
 251 
 252 G1AllocRegion::G1AllocRegion(const char* name,
 253                              bool bot_updates,
 254                              uint node_index)
 255   : _alloc_region(NULL),
 256     _count(0),
 257     _used_bytes_before(0),
 258     _bot_updates(bot_updates),
 259     _name(name),
 260     _node_index(node_index)
 261  { }
 262 
 263 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
 264                                                     bool force) {
 265   return _g1h->new_mutator_alloc_region(word_size, force, _node_index);
 266 }
 267 
 268 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
 269                                        size_t allocated_bytes) {
 270   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
 271 }
 272 
 273 void MutatorAllocRegion::init() {
 274   assert(_retained_alloc_region == NULL, "Pre-condition");
 275   G1AllocRegion::init();
 276   _wasted_bytes = 0;
 277 }
 278 
 279 bool MutatorAllocRegion::should_retain(HeapRegion* region) {
 280   size_t free_bytes = region->free();
 281   if (free_bytes < MinTLABSize) {
 282     return false;
 283   }
 284 
 285   if (_retained_alloc_region != NULL &&
 286       free_bytes < _retained_alloc_region->free()) {
 287     return false;
 288   }
 289 
 290   return true;
 291 }
 292 
 293 size_t MutatorAllocRegion::retire(bool fill_up) {
 294   size_t waste = 0;
 295   trace("retiring");
 296   HeapRegion* current_region = get();
 297   if (current_region != NULL) {
 298     // Retain the current region if it fits a TLAB and has more
 299     // free than the currently retained region.
 300     if (should_retain(current_region)) {
 301       trace("mutator retained");
 302       if (_retained_alloc_region != NULL) {
 303         waste = retire_internal(_retained_alloc_region, true);
 304       }
 305       _retained_alloc_region = current_region;
 306     } else {
 307       waste = retire_internal(current_region, fill_up);
 308     }
 309     reset_alloc_region();
 310   }
 311 
 312   _wasted_bytes += waste;
 313   trace("retired");
 314   return waste;
 315 }
 316 
 317 size_t MutatorAllocRegion::used_in_alloc_regions() {
 318   size_t used = 0;
 319   HeapRegion* hr = get();
 320   if (hr != NULL) {
 321     used += hr->used();
 322   }
 323 
 324   hr = _retained_alloc_region;
 325   if (hr != NULL) {
 326     used += hr->used();
 327   }
 328   return used;
 329 }
 330 
 331 HeapRegion* MutatorAllocRegion::release() {
 332   HeapRegion* ret = G1AllocRegion::release();
 333 
 334   // The retained alloc region must be retired and this must be
 335   // done after the above call to release the mutator alloc region,
 336   // since it might update the _retained_alloc_region member.
 337   if (_retained_alloc_region != NULL) {
 338     _wasted_bytes += retire_internal(_retained_alloc_region, false);
 339     _retained_alloc_region = NULL;
 340   }
 341   log_debug(gc, alloc, region)("Mutator Allocation stats, regions: %u, wasted size: " SIZE_FORMAT "%s (%4.1f%%)",
 342                                count(),
 343                                byte_size_in_proper_unit(_wasted_bytes),
 344                                proper_unit_for_byte_size(_wasted_bytes),
 345                                percent_of(_wasted_bytes, count() * HeapRegion::GrainBytes));
 346   return ret;
 347 }
 348 
 349 HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
 350                                                  bool force) {
 351   assert(!force, "not supported for GC alloc regions");
 352   return _g1h->new_gc_alloc_region(word_size, _purpose);
 353 }
 354 
 355 void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
 356                                     size_t allocated_bytes) {
 357   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose);
 358 }
 359 
 360 size_t G1GCAllocRegion::retire(bool fill_up) {
 361   HeapRegion* retired = get();
 362   size_t end_waste = G1AllocRegion::retire(fill_up);
 363   // Do not count retirement of the dummy allocation region.
 364   if (retired != NULL) {
 365     _stats->add_region_end_waste(end_waste / HeapWordSize);
 366   }
 367   return end_waste;
 368 }
 369 
 370 HeapRegion* OldGCAllocRegion::release() {
 371   HeapRegion* cur = get();
 372   if (cur != NULL) {
 373     // Determine how far we are from the next card boundary. If it is smaller than
 374     // the minimum object size we can allocate into, expand into the next card.
 375     HeapWord* top = cur->top();
 376     HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
 377 
 378     size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
 379 
 380     if (to_allocate_words != 0) {
 381       // We are not at a card boundary. Fill up, possibly into the next, taking the
 382       // end of the region and the minimum object size into account.
 383       to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
 384                                MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
 385 
 386       // Skip allocation if there is not enough space to allocate even the smallest
 387       // possible object. In this case this region will not be retained, so the
 388       // original problem cannot occur.
 389       if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
 390         HeapWord* dummy = attempt_allocation(to_allocate_words);
 391         CollectedHeap::fill_with_object(dummy, to_allocate_words);
 392       }
 393     }
 394   }
 395   return G1AllocRegion::release();
 396 }