1 /*
   2  * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_tenuredGeneration.cpp.incl"
  27 
  28 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  29                                      size_t initial_byte_size, int level,
  30                                      GenRemSet* remset) :
  31   OneContigSpaceCardGeneration(rs, initial_byte_size,
  32                                MinHeapDeltaBytes, level, remset, NULL)
  33 {
  34   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  35   HeapWord* end    = (HeapWord*) _virtual_space.high();
  36   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  37   _the_space->reset_saved_mark();
  38   _shrink_factor = 0;
  39   _capacity_at_prologue = 0;
  40 
  41   _gc_stats = new GCStats();
  42 
  43   // initialize performance counters
  44 
  45   const char* gen_name = "old";
  46 
  47   // Generation Counters -- generation 1, 1 subspace
  48   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
  49 
  50   _gc_counters = new CollectorCounters("MSC", 1);
  51 
  52   _space_counters = new CSpaceCounters(gen_name, 0,
  53                                        _virtual_space.reserved_size(),
  54                                        _the_space, _gen_counters);
  55 #ifndef SERIALGC
  56   if (UseParNewGC && ParallelGCThreads > 0) {
  57     typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
  58     _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
  59                                       ParallelGCThreads);
  60     if (_alloc_buffers == NULL)
  61       vm_exit_during_initialization("Could not allocate alloc_buffers");
  62     for (uint i = 0; i < ParallelGCThreads; i++) {
  63       _alloc_buffers[i] =
  64         new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
  65       if (_alloc_buffers[i] == NULL)
  66         vm_exit_during_initialization("Could not allocate alloc_buffers");
  67     }
  68   } else {
  69     _alloc_buffers = NULL;
  70   }
  71 #endif // SERIALGC
  72 }
  73 
  74 
  75 const char* TenuredGeneration::name() const {
  76   return "tenured generation";
  77 }
  78 
  79 void TenuredGeneration::compute_new_size() {
  80   assert(_shrink_factor <= 100, "invalid shrink factor");
  81   size_t current_shrink_factor = _shrink_factor;
  82   _shrink_factor = 0;
  83 
  84   // We don't have floating point command-line arguments
  85   // Note:  argument processing ensures that MinHeapFreeRatio < 100.
  86   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  87   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  88 
  89   // Compute some numbers about the state of the heap.
  90   const size_t used_after_gc = used();
  91   const size_t capacity_after_gc = capacity();
  92 
  93   const double min_tmp = used_after_gc / maximum_used_percentage;
  94   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
  95   // Don't shrink less than the initial generation size
  96   minimum_desired_capacity = MAX2(minimum_desired_capacity,
  97                                   spec()->init_size());
  98   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  99 
 100   if (PrintGC && Verbose) {
 101     const size_t free_after_gc = free();
 102     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
 103     gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
 104     gclog_or_tty->print_cr("  "
 105                   "  minimum_free_percentage: %6.2f"
 106                   "  maximum_used_percentage: %6.2f",
 107                   minimum_free_percentage,
 108                   maximum_used_percentage);
 109     gclog_or_tty->print_cr("  "
 110                   "   free_after_gc   : %6.1fK"
 111                   "   used_after_gc   : %6.1fK"
 112                   "   capacity_after_gc   : %6.1fK",
 113                   free_after_gc / (double) K,
 114                   used_after_gc / (double) K,
 115                   capacity_after_gc / (double) K);
 116     gclog_or_tty->print_cr("  "
 117                   "   free_percentage: %6.2f",
 118                   free_percentage);
 119   }
 120 
 121   if (capacity_after_gc < minimum_desired_capacity) {
 122     // If we have less free space than we want then expand
 123     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
 124     // Don't expand unless it's significant
 125     if (expand_bytes >= _min_heap_delta_bytes) {
 126       expand(expand_bytes, 0); // safe if expansion fails
 127     }
 128     if (PrintGC && Verbose) {
 129       gclog_or_tty->print_cr("    expanding:"
 130                     "  minimum_desired_capacity: %6.1fK"
 131                     "  expand_bytes: %6.1fK"
 132                     "  _min_heap_delta_bytes: %6.1fK",
 133                     minimum_desired_capacity / (double) K,
 134                     expand_bytes / (double) K,
 135                     _min_heap_delta_bytes / (double) K);
 136     }
 137     return;
 138   }
 139 
 140   // No expansion, now see if we want to shrink
 141   size_t shrink_bytes = 0;
 142   // We would never want to shrink more than this
 143   size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
 144 
 145   if (MaxHeapFreeRatio < 100) {
 146     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
 147     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 148     const double max_tmp = used_after_gc / minimum_used_percentage;
 149     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 150     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 151                                     spec()->init_size());
 152     if (PrintGC && Verbose) {
 153       gclog_or_tty->print_cr("  "
 154                              "  maximum_free_percentage: %6.2f"
 155                              "  minimum_used_percentage: %6.2f",
 156                              maximum_free_percentage,
 157                              minimum_used_percentage);
 158       gclog_or_tty->print_cr("  "
 159                              "  _capacity_at_prologue: %6.1fK"
 160                              "  minimum_desired_capacity: %6.1fK"
 161                              "  maximum_desired_capacity: %6.1fK",
 162                              _capacity_at_prologue / (double) K,
 163                              minimum_desired_capacity / (double) K,
 164                              maximum_desired_capacity / (double) K);
 165     }
 166     assert(minimum_desired_capacity <= maximum_desired_capacity,
 167            "sanity check");
 168 
 169     if (capacity_after_gc > maximum_desired_capacity) {
 170       // Capacity too large, compute shrinking size
 171       shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 172       // We don't want shrink all the way back to initSize if people call
 173       // System.gc(), because some programs do that between "phases" and then
 174       // we'd just have to grow the heap up again for the next phase.  So we
 175       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 176       // on the third call, and 100% by the fourth call.  But if we recompute
 177       // size without shrinking, it goes back to 0%.
 178       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 179       assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 180       if (current_shrink_factor == 0) {
 181         _shrink_factor = 10;
 182       } else {
 183         _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
 184       }
 185       if (PrintGC && Verbose) {
 186         gclog_or_tty->print_cr("  "
 187                       "  shrinking:"
 188                       "  initSize: %.1fK"
 189                       "  maximum_desired_capacity: %.1fK",
 190                       spec()->init_size() / (double) K,
 191                       maximum_desired_capacity / (double) K);
 192         gclog_or_tty->print_cr("  "
 193                       "  shrink_bytes: %.1fK"
 194                       "  current_shrink_factor: %d"
 195                       "  new shrink factor: %d"
 196                       "  _min_heap_delta_bytes: %.1fK",
 197                       shrink_bytes / (double) K,
 198                       current_shrink_factor,
 199                       _shrink_factor,
 200                       _min_heap_delta_bytes / (double) K);
 201       }
 202     }
 203   }
 204 
 205   if (capacity_after_gc > _capacity_at_prologue) {
 206     // We might have expanded for promotions, in which case we might want to
 207     // take back that expansion if there's room after GC.  That keeps us from
 208     // stretching the heap with promotions when there's plenty of room.
 209     size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
 210     expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
 211     // We have two shrinking computations, take the largest
 212     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
 213     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 214     if (PrintGC && Verbose) {
 215       gclog_or_tty->print_cr("  "
 216                              "  aggressive shrinking:"
 217                              "  _capacity_at_prologue: %.1fK"
 218                              "  capacity_after_gc: %.1fK"
 219                              "  expansion_for_promotion: %.1fK"
 220                              "  shrink_bytes: %.1fK",
 221                              capacity_after_gc / (double) K,
 222                              _capacity_at_prologue / (double) K,
 223                              expansion_for_promotion / (double) K,
 224                              shrink_bytes / (double) K);
 225     }
 226   }
 227   // Don't shrink unless it's significant
 228   if (shrink_bytes >= _min_heap_delta_bytes) {
 229     shrink(shrink_bytes);
 230   }
 231   assert(used() == used_after_gc && used_after_gc <= capacity(),
 232          "sanity check");
 233 }
 234 
 235 void TenuredGeneration::gc_prologue(bool full) {
 236   _capacity_at_prologue = capacity();
 237   _used_at_prologue = used();
 238   if (VerifyBeforeGC) {
 239     verify_alloc_buffers_clean();
 240   }
 241 }
 242 
 243 void TenuredGeneration::gc_epilogue(bool full) {
 244   if (VerifyAfterGC) {
 245     verify_alloc_buffers_clean();
 246   }
 247   OneContigSpaceCardGeneration::gc_epilogue(full);
 248 }
 249 
 250 
 251 bool TenuredGeneration::should_collect(bool  full,
 252                                        size_t size,
 253                                        bool   is_tlab) {
 254   // This should be one big conditional or (||), but I want to be able to tell
 255   // why it returns what it returns (without re-evaluating the conditionals
 256   // in case they aren't idempotent), so I'm doing it this way.
 257   // DeMorgan says it's okay.
 258   bool result = false;
 259   if (!result && full) {
 260     result = true;
 261     if (PrintGC && Verbose) {
 262       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 263                     " full");
 264     }
 265   }
 266   if (!result && should_allocate(size, is_tlab)) {
 267     result = true;
 268     if (PrintGC && Verbose) {
 269       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 270                     " should_allocate(" SIZE_FORMAT ")",
 271                     size);
 272     }
 273   }
 274   // If we don't have very much free space.
 275   // XXX: 10000 should be a percentage of the capacity!!!
 276   if (!result && free() < 10000) {
 277     result = true;
 278     if (PrintGC && Verbose) {
 279       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 280                     " free(): " SIZE_FORMAT,
 281                     free());
 282     }
 283   }
 284   // If we had to expand to accomodate promotions from younger generations
 285   if (!result && _capacity_at_prologue < capacity()) {
 286     result = true;
 287     if (PrintGC && Verbose) {
 288       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 289                     "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 290                     _capacity_at_prologue, capacity());
 291     }
 292   }
 293   return result;
 294 }
 295 
 296 void TenuredGeneration::collect(bool   full,
 297                                 bool   clear_all_soft_refs,
 298                                 size_t size,
 299                                 bool   is_tlab) {
 300   retire_alloc_buffers_before_full_gc();
 301   OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
 302                                         size, is_tlab);
 303 }
 304 
 305 void TenuredGeneration::update_gc_stats(int current_level,
 306                                         bool full) {
 307   // If the next lower level(s) has been collected, gather any statistics
 308   // that are of interest at this point.
 309   if (!full && (current_level + 1) == level()) {
 310     // Calculate size of data promoted from the younger generations
 311     // before doing the collection.
 312     size_t used_before_gc = used();
 313 
 314     // If the younger gen collections were skipped, then the
 315     // number of promoted bytes will be 0 and adding it to the
 316     // average will incorrectly lessen the average.  It is, however,
 317     // also possible that no promotion was needed.
 318     if (used_before_gc >= _used_at_prologue) {
 319       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 320       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 321     }
 322   }
 323 }
 324 
 325 void TenuredGeneration::update_counters() {
 326   if (UsePerfData) {
 327     _space_counters->update_all();
 328     _gen_counters->update_all();
 329   }
 330 }
 331 
 332 
 333 #ifndef SERIALGC
 334 oop TenuredGeneration::par_promote(int thread_num,
 335                                    oop old, markOop m, size_t word_sz) {
 336 
 337   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 338   HeapWord* obj_ptr = buf->allocate(word_sz);
 339   bool is_lab = true;
 340   if (obj_ptr == NULL) {
 341 #ifndef PRODUCT
 342     if (Universe::heap()->promotion_should_fail()) {
 343       return NULL;
 344     }
 345 #endif  // #ifndef PRODUCT
 346 
 347     // Slow path:
 348     if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
 349       // Is small enough; abandon this buffer and start a new one.
 350       size_t buf_size = buf->word_sz();
 351       HeapWord* buf_space =
 352         TenuredGeneration::par_allocate(buf_size, false);
 353       if (buf_space == NULL) {
 354         buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
 355       }
 356       if (buf_space != NULL) {
 357         buf->retire(false, false);
 358         buf->set_buf(buf_space);
 359         obj_ptr = buf->allocate(word_sz);
 360         assert(obj_ptr != NULL, "Buffer was definitely big enough...");
 361       }
 362     };
 363     // Otherwise, buffer allocation failed; try allocating object
 364     // individually.
 365     if (obj_ptr == NULL) {
 366       obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
 367       if (obj_ptr == NULL) {
 368         obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
 369       }
 370     }
 371     if (obj_ptr == NULL) return NULL;
 372   }
 373   assert(obj_ptr != NULL, "program logic");
 374   Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
 375   oop obj = oop(obj_ptr);
 376   // Restore the mark word copied above.
 377   obj->set_mark(m);
 378   return obj;
 379 }
 380 
 381 void TenuredGeneration::par_promote_alloc_undo(int thread_num,
 382                                                HeapWord* obj,
 383                                                size_t word_sz) {
 384   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 385   if (buf->contains(obj)) {
 386     guarantee(buf->contains(obj + word_sz - 1),
 387               "should contain whole object");
 388     buf->undo_allocation(obj, word_sz);
 389   } else {
 390     CollectedHeap::fill_with_object(obj, word_sz);
 391   }
 392 }
 393 
 394 void TenuredGeneration::par_promote_alloc_done(int thread_num) {
 395   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 396   buf->retire(true, ParallelGCRetainPLAB);
 397 }
 398 
 399 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
 400   if (UseParNewGC) {
 401     for (uint i = 0; i < ParallelGCThreads; i++) {
 402       _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
 403     }
 404   }
 405 }
 406 
 407 // Verify that any retained parallel allocation buffers do not
 408 // intersect with dirty cards.
 409 void TenuredGeneration::verify_alloc_buffers_clean() {
 410   if (UseParNewGC) {
 411     for (uint i = 0; i < ParallelGCThreads; i++) {
 412       _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
 413     }
 414   }
 415 }
 416 
 417 #else  // SERIALGC
 418 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
 419 void TenuredGeneration::verify_alloc_buffers_clean() {}
 420 #endif // SERIALGC
 421 
 422 bool TenuredGeneration::promotion_attempt_is_safe(
 423     size_t max_promotion_in_bytes,
 424     bool younger_handles_promotion_failure) const {
 425 
 426   bool result = max_contiguous_available() >= max_promotion_in_bytes;
 427 
 428   if (younger_handles_promotion_failure && !result) {
 429     result = max_contiguous_available() >=
 430       (size_t) gc_stats()->avg_promoted()->padded_average();
 431     if (PrintGC && Verbose && result) {
 432       gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
 433                   " contiguous_available: " SIZE_FORMAT
 434                   " avg_promoted: " SIZE_FORMAT,
 435                   max_contiguous_available(),
 436                   gc_stats()->avg_promoted()->padded_average());
 437     }
 438   } else {
 439     if (PrintGC && Verbose) {
 440       gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
 441                   " contiguous_available: " SIZE_FORMAT
 442                   " promotion_in_bytes: " SIZE_FORMAT,
 443                   max_contiguous_available(), max_promotion_in_bytes);
 444     }
 445   }
 446   return result;
 447 }