1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/blockOffsetTable.inline.hpp"
  30 #include "memory/generation.inline.hpp"
  31 #include "memory/generationSpec.hpp"
  32 #include "memory/space.hpp"
  33 #include "memory/tenuredGeneration.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/java.hpp"
  36 
  37 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  38                                      size_t initial_byte_size, int level,
  39                                      GenRemSet* remset) :
  40   OneContigSpaceCardGeneration(rs, initial_byte_size,
  41                                MinHeapDeltaBytes, level, remset, NULL)
  42 {
  43   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  44   HeapWord* end    = (HeapWord*) _virtual_space.high();
  45   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  46   _the_space->reset_saved_mark();
  47   _shrink_factor = 0;
  48   _capacity_at_prologue = 0;
  49 
  50   _gc_stats = new GCStats();
  51 
  52   // initialize performance counters
  53 
  54   const char* gen_name = "old";
  55 
  56   // Generation Counters -- generation 1, 1 subspace
  57   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
  58 
  59   _gc_counters = new CollectorCounters("MSC", 1);
  60 
  61   _space_counters = new CSpaceCounters(gen_name, 0,
  62                                        _virtual_space.reserved_size(),
  63                                        _the_space, _gen_counters);
  64 #ifndef SERIALGC
  65   if (UseParNewGC) {
  66     typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
  67     _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
  68                                       ParallelGCThreads, mtGC);
  69     if (_alloc_buffers == NULL)
  70       vm_exit_during_initialization("Could not allocate alloc_buffers");
  71     for (uint i = 0; i < ParallelGCThreads; i++) {
  72       _alloc_buffers[i] =
  73         new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
  74       if (_alloc_buffers[i] == NULL)
  75         vm_exit_during_initialization("Could not allocate alloc_buffers");
  76     }
  77   } else {
  78     _alloc_buffers = NULL;
  79   }
  80 #endif // SERIALGC
  81 }
  82 
  83 
  84 const char* TenuredGeneration::name() const {
  85   return "tenured generation";
  86 }
  87 
  88 void TenuredGeneration::compute_new_size() {
  89   assert(_shrink_factor <= 100, "invalid shrink factor");
  90   size_t current_shrink_factor = _shrink_factor;
  91   _shrink_factor = 0;
  92 
  93   // We don't have floating point command-line arguments
  94   // Note:  argument processing ensures that MinHeapFreeRatio < 100.
  95   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  96   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  97 
  98   // Compute some numbers about the state of the heap.
  99   const size_t used_after_gc = used();
 100   const size_t capacity_after_gc = capacity();
 101 
 102   const double min_tmp = used_after_gc / maximum_used_percentage;
 103   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
 104   // Don't shrink less than the initial generation size
 105   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 106                                   spec()->init_size());
 107   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
 108 
 109   if (PrintGC && Verbose) {
 110     const size_t free_after_gc = free();
 111     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
 112     gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
 113     gclog_or_tty->print_cr("  "
 114                   "  minimum_free_percentage: %6.2f"
 115                   "  maximum_used_percentage: %6.2f",
 116                   minimum_free_percentage,
 117                   maximum_used_percentage);
 118     gclog_or_tty->print_cr("  "
 119                   "   free_after_gc   : %6.1fK"
 120                   "   used_after_gc   : %6.1fK"
 121                   "   capacity_after_gc   : %6.1fK",
 122                   free_after_gc / (double) K,
 123                   used_after_gc / (double) K,
 124                   capacity_after_gc / (double) K);
 125     gclog_or_tty->print_cr("  "
 126                   "   free_percentage: %6.2f",
 127                   free_percentage);
 128   }
 129 
 130   if (capacity_after_gc < minimum_desired_capacity) {
 131     // If we have less free space than we want then expand
 132     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
 133     // Don't expand unless it's significant
 134     if (expand_bytes >= _min_heap_delta_bytes) {
 135       expand(expand_bytes, 0); // safe if expansion fails
 136     }
 137     if (PrintGC && Verbose) {
 138       gclog_or_tty->print_cr("    expanding:"
 139                     "  minimum_desired_capacity: %6.1fK"
 140                     "  expand_bytes: %6.1fK"
 141                     "  _min_heap_delta_bytes: %6.1fK",
 142                     minimum_desired_capacity / (double) K,
 143                     expand_bytes / (double) K,
 144                     _min_heap_delta_bytes / (double) K);
 145     }
 146     return;
 147   }
 148 
 149   // No expansion, now see if we want to shrink
 150   size_t shrink_bytes = 0;
 151   // We would never want to shrink more than this
 152   size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
 153 
 154   if (MaxHeapFreeRatio < 100) {
 155     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
 156     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 157     const double max_tmp = used_after_gc / minimum_used_percentage;
 158     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 159     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 160                                     spec()->init_size());
 161     if (PrintGC && Verbose) {
 162       gclog_or_tty->print_cr("  "
 163                              "  maximum_free_percentage: %6.2f"
 164                              "  minimum_used_percentage: %6.2f",
 165                              maximum_free_percentage,
 166                              minimum_used_percentage);
 167       gclog_or_tty->print_cr("  "
 168                              "  _capacity_at_prologue: %6.1fK"
 169                              "  minimum_desired_capacity: %6.1fK"
 170                              "  maximum_desired_capacity: %6.1fK",
 171                              _capacity_at_prologue / (double) K,
 172                              minimum_desired_capacity / (double) K,
 173                              maximum_desired_capacity / (double) K);
 174     }
 175     assert(minimum_desired_capacity <= maximum_desired_capacity,
 176            "sanity check");
 177 
 178     if (capacity_after_gc > maximum_desired_capacity) {
 179       // Capacity too large, compute shrinking size
 180       shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 181       // We don't want shrink all the way back to initSize if people call
 182       // System.gc(), because some programs do that between "phases" and then
 183       // we'd just have to grow the heap up again for the next phase.  So we
 184       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 185       // on the third call, and 100% by the fourth call.  But if we recompute
 186       // size without shrinking, it goes back to 0%.
 187       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 188       assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 189       if (current_shrink_factor == 0) {
 190         _shrink_factor = 10;
 191       } else {
 192         _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
 193       }
 194       if (PrintGC && Verbose) {
 195         gclog_or_tty->print_cr("  "
 196                       "  shrinking:"
 197                       "  initSize: %.1fK"
 198                       "  maximum_desired_capacity: %.1fK",
 199                       spec()->init_size() / (double) K,
 200                       maximum_desired_capacity / (double) K);
 201         gclog_or_tty->print_cr("  "
 202                       "  shrink_bytes: %.1fK"
 203                       "  current_shrink_factor: %d"
 204                       "  new shrink factor: %d"
 205                       "  _min_heap_delta_bytes: %.1fK",
 206                       shrink_bytes / (double) K,
 207                       current_shrink_factor,
 208                       _shrink_factor,
 209                       _min_heap_delta_bytes / (double) K);
 210       }
 211     }
 212   }
 213 
 214   if (capacity_after_gc > _capacity_at_prologue) {
 215     // We might have expanded for promotions, in which case we might want to
 216     // take back that expansion if there's room after GC.  That keeps us from
 217     // stretching the heap with promotions when there's plenty of room.
 218     size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
 219     expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
 220     // We have two shrinking computations, take the largest
 221     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
 222     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 223     if (PrintGC && Verbose) {
 224       gclog_or_tty->print_cr("  "
 225                              "  aggressive shrinking:"
 226                              "  _capacity_at_prologue: %.1fK"
 227                              "  capacity_after_gc: %.1fK"
 228                              "  expansion_for_promotion: %.1fK"
 229                              "  shrink_bytes: %.1fK",
 230                              capacity_after_gc / (double) K,
 231                              _capacity_at_prologue / (double) K,
 232                              expansion_for_promotion / (double) K,
 233                              shrink_bytes / (double) K);
 234     }
 235   }
 236   // Don't shrink unless it's significant
 237   if (shrink_bytes >= _min_heap_delta_bytes) {
 238     shrink(shrink_bytes);
 239   }
 240   assert(used() == used_after_gc && used_after_gc <= capacity(),
 241          "sanity check");
 242 }
 243 
 244 void TenuredGeneration::gc_prologue(bool full) {
 245   _capacity_at_prologue = capacity();
 246   _used_at_prologue = used();
 247   if (VerifyBeforeGC) {
 248     verify_alloc_buffers_clean();
 249   }
 250 }
 251 
 252 void TenuredGeneration::gc_epilogue(bool full) {
 253   if (VerifyAfterGC) {
 254     verify_alloc_buffers_clean();
 255   }
 256   OneContigSpaceCardGeneration::gc_epilogue(full);
 257 }
 258 
 259 
 260 bool TenuredGeneration::should_collect(bool  full,
 261                                        size_t size,
 262                                        bool   is_tlab) {
 263   // This should be one big conditional or (||), but I want to be able to tell
 264   // why it returns what it returns (without re-evaluating the conditionals
 265   // in case they aren't idempotent), so I'm doing it this way.
 266   // DeMorgan says it's okay.
 267   bool result = false;
 268   if (!result && full) {
 269     result = true;
 270     if (PrintGC && Verbose) {
 271       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 272                     " full");
 273     }
 274   }
 275   if (!result && should_allocate(size, is_tlab)) {
 276     result = true;
 277     if (PrintGC && Verbose) {
 278       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 279                     " should_allocate(" SIZE_FORMAT ")",
 280                     size);
 281     }
 282   }
 283   // If we don't have very much free space.
 284   // XXX: 10000 should be a percentage of the capacity!!!
 285   if (!result && free() < 10000) {
 286     result = true;
 287     if (PrintGC && Verbose) {
 288       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 289                     " free(): " SIZE_FORMAT,
 290                     free());
 291     }
 292   }
 293   // If we had to expand to accomodate promotions from younger generations
 294   if (!result && _capacity_at_prologue < capacity()) {
 295     result = true;
 296     if (PrintGC && Verbose) {
 297       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 298                     "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 299                     _capacity_at_prologue, capacity());
 300     }
 301   }
 302   return result;
 303 }
 304 
 305 void TenuredGeneration::collect(bool   full,
 306                                 bool   clear_all_soft_refs,
 307                                 size_t size,
 308                                 bool   is_tlab) {
 309   retire_alloc_buffers_before_full_gc();
 310   OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
 311                                         size, is_tlab);
 312 }
 313 
 314 void TenuredGeneration::update_gc_stats(int current_level,
 315                                         bool full) {
 316   // If the next lower level(s) has been collected, gather any statistics
 317   // that are of interest at this point.
 318   if (!full && (current_level + 1) == level()) {
 319     // Calculate size of data promoted from the younger generations
 320     // before doing the collection.
 321     size_t used_before_gc = used();
 322 
 323     // If the younger gen collections were skipped, then the
 324     // number of promoted bytes will be 0 and adding it to the
 325     // average will incorrectly lessen the average.  It is, however,
 326     // also possible that no promotion was needed.
 327     if (used_before_gc >= _used_at_prologue) {
 328       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 329       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 330     }
 331   }
 332 }
 333 
 334 void TenuredGeneration::update_counters() {
 335   if (UsePerfData) {
 336     _space_counters->update_all();
 337     _gen_counters->update_all();
 338   }
 339 }
 340 
 341 
 342 #ifndef SERIALGC
 343 oop TenuredGeneration::par_promote(int thread_num,
 344                                    oop old, markOop m, size_t word_sz) {
 345 
 346   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 347   HeapWord* obj_ptr = buf->allocate(word_sz);
 348   bool is_lab = true;
 349   if (obj_ptr == NULL) {
 350 #ifndef PRODUCT
 351     if (Universe::heap()->promotion_should_fail()) {
 352       return NULL;
 353     }
 354 #endif  // #ifndef PRODUCT
 355 
 356     // Slow path:
 357     if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
 358       // Is small enough; abandon this buffer and start a new one.
 359       size_t buf_size = buf->word_sz();
 360       HeapWord* buf_space =
 361         TenuredGeneration::par_allocate(buf_size, false);
 362       if (buf_space == NULL) {
 363         buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
 364       }
 365       if (buf_space != NULL) {
 366         buf->retire(false, false);
 367         buf->set_buf(buf_space);
 368         obj_ptr = buf->allocate(word_sz);
 369         assert(obj_ptr != NULL, "Buffer was definitely big enough...");
 370       }
 371     };
 372     // Otherwise, buffer allocation failed; try allocating object
 373     // individually.
 374     if (obj_ptr == NULL) {
 375       obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
 376       if (obj_ptr == NULL) {
 377         obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
 378       }
 379     }
 380     if (obj_ptr == NULL) return NULL;
 381   }
 382   assert(obj_ptr != NULL, "program logic");
 383   Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
 384   oop obj = oop(obj_ptr);
 385   // Restore the mark word copied above.
 386   obj->set_mark(m);
 387   return obj;
 388 }
 389 
 390 void TenuredGeneration::par_promote_alloc_undo(int thread_num,
 391                                                HeapWord* obj,
 392                                                size_t word_sz) {
 393   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 394   if (buf->contains(obj)) {
 395     guarantee(buf->contains(obj + word_sz - 1),
 396               "should contain whole object");
 397     buf->undo_allocation(obj, word_sz);
 398   } else {
 399     CollectedHeap::fill_with_object(obj, word_sz);
 400   }
 401 }
 402 
 403 void TenuredGeneration::par_promote_alloc_done(int thread_num) {
 404   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 405   buf->retire(true, ParallelGCRetainPLAB);
 406 }
 407 
 408 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
 409   if (UseParNewGC) {
 410     for (uint i = 0; i < ParallelGCThreads; i++) {
 411       _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
 412     }
 413   }
 414 }
 415 
 416 // Verify that any retained parallel allocation buffers do not
 417 // intersect with dirty cards.
 418 void TenuredGeneration::verify_alloc_buffers_clean() {
 419   if (UseParNewGC) {
 420     for (uint i = 0; i < ParallelGCThreads; i++) {
 421       _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
 422     }
 423   }
 424 }
 425 
 426 #else  // SERIALGC
 427 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
 428 void TenuredGeneration::verify_alloc_buffers_clean() {}
 429 #endif // SERIALGC
 430 
 431 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 432   size_t available = max_contiguous_available();
 433   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 434   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 435   if (PrintGC && Verbose) {
 436     gclog_or_tty->print_cr(
 437       "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 438       "max_promo("SIZE_FORMAT")",
 439       res? "":" not", available, res? ">=":"<",
 440       av_promo, max_promotion_in_bytes);
 441   }
 442   return res;
 443 }