1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)tenuredGeneration.cpp        1.47 07/05/29 09:44:17 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_tenuredGeneration.cpp.incl"
  30 
  31 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  32                                      size_t initial_byte_size, int level, 
  33                                      GenRemSet* remset) :
  34   OneContigSpaceCardGeneration(rs, initial_byte_size,
  35                                MinHeapDeltaBytes, level, remset, NULL)
  36 {
  37   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  38   HeapWord* end    = (HeapWord*) _virtual_space.high();
  39   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  40   _the_space->reset_saved_mark();
  41   _shrink_factor = 0;
  42   _capacity_at_prologue = 0;
  43 
  44   _gc_stats = new GCStats();
  45 
  46   // initialize performance counters
  47 
  48   const char* gen_name = "old";
  49 
  50   // Generation Counters -- generation 1, 1 subspace
  51   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
  52 
  53   _gc_counters = new CollectorCounters("MSC", 1);
  54 
  55   _space_counters = new CSpaceCounters(gen_name, 0,
  56                                        _virtual_space.reserved_size(),
  57                                        _the_space, _gen_counters);
  58 #ifndef SERIALGC
  59   if (UseParNewGC && ParallelGCThreads > 0) {
  60     typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
  61     _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
  62                                       ParallelGCThreads);
  63     if (_alloc_buffers == NULL) 
  64       vm_exit_during_initialization("Could not allocate alloc_buffers");
  65     for (uint i = 0; i < ParallelGCThreads; i++) {
  66       _alloc_buffers[i] =
  67         new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
  68       if (_alloc_buffers[i] == NULL) 
  69         vm_exit_during_initialization("Could not allocate alloc_buffers");
  70     }
  71   } else {
  72     _alloc_buffers = NULL;
  73   }
  74 #endif // SERIALGC
  75 }
  76 
  77 
  78 const char* TenuredGeneration::name() const {
  79   return "tenured generation";
  80 }
  81 
  82 void TenuredGeneration::compute_new_size() {
  83   assert(_shrink_factor <= 100, "invalid shrink factor");
  84   size_t current_shrink_factor = _shrink_factor;
  85   _shrink_factor = 0;
  86 
  87   // We don't have floating point command-line arguments
  88   // Note:  argument processing ensures that MinHeapFreeRatio < 100.
  89   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  90   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  91 
  92   // Compute some numbers about the state of the heap.
  93   const size_t used_after_gc = used();
  94   const size_t capacity_after_gc = capacity();
  95 
  96   const double min_tmp = used_after_gc / maximum_used_percentage;
  97   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
  98   // Don't shrink less than the initial generation size
  99   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 100                                   spec()->init_size());
 101   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
 102 
 103   if (PrintGC && Verbose) {
 104     const size_t free_after_gc = free();
 105     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
 106     gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
 107     gclog_or_tty->print_cr("  "
 108                   "  minimum_free_percentage: %6.2f"
 109                   "  maximum_used_percentage: %6.2f",
 110                   minimum_free_percentage,
 111                   maximum_used_percentage);
 112     gclog_or_tty->print_cr("  "
 113                   "   free_after_gc   : %6.1fK"
 114                   "   used_after_gc   : %6.1fK"
 115                   "   capacity_after_gc   : %6.1fK",
 116                   free_after_gc / (double) K,
 117                   used_after_gc / (double) K,
 118                   capacity_after_gc / (double) K);
 119     gclog_or_tty->print_cr("  "
 120                   "   free_percentage: %6.2f",
 121                   free_percentage);
 122   }
 123 
 124   if (capacity_after_gc < minimum_desired_capacity) {
 125     // If we have less free space than we want then expand
 126     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
 127     // Don't expand unless it's significant
 128     if (expand_bytes >= _min_heap_delta_bytes) {
 129       expand(expand_bytes, 0); // safe if expansion fails
 130     }
 131     if (PrintGC && Verbose) {
 132       gclog_or_tty->print_cr("    expanding:"
 133                     "  minimum_desired_capacity: %6.1fK"
 134                     "  expand_bytes: %6.1fK"
 135                     "  _min_heap_delta_bytes: %6.1fK",
 136                     minimum_desired_capacity / (double) K,
 137                     expand_bytes / (double) K,
 138                     _min_heap_delta_bytes / (double) K);
 139     }
 140     return;
 141   } 
 142 
 143   // No expansion, now see if we want to shrink
 144   size_t shrink_bytes = 0;
 145   // We would never want to shrink more than this
 146   size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
 147 
 148   if (MaxHeapFreeRatio < 100) {
 149     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
 150     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 151     const double max_tmp = used_after_gc / minimum_used_percentage;
 152     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 153     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 154                                     spec()->init_size());
 155     if (PrintGC && Verbose) {
 156       gclog_or_tty->print_cr("  "
 157                              "  maximum_free_percentage: %6.2f"
 158                              "  minimum_used_percentage: %6.2f",
 159                              maximum_free_percentage,
 160                              minimum_used_percentage);
 161       gclog_or_tty->print_cr("  "
 162                              "  _capacity_at_prologue: %6.1fK"
 163                              "  minimum_desired_capacity: %6.1fK"
 164                              "  maximum_desired_capacity: %6.1fK",
 165                              _capacity_at_prologue / (double) K,
 166                              minimum_desired_capacity / (double) K,
 167                              maximum_desired_capacity / (double) K);
 168     }
 169     assert(minimum_desired_capacity <= maximum_desired_capacity,
 170            "sanity check");
 171 
 172     if (capacity_after_gc > maximum_desired_capacity) {
 173       // Capacity too large, compute shrinking size
 174       shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 175       // We don't want shrink all the way back to initSize if people call
 176       // System.gc(), because some programs do that between "phases" and then
 177       // we'd just have to grow the heap up again for the next phase.  So we
 178       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 179       // on the third call, and 100% by the fourth call.  But if we recompute
 180       // size without shrinking, it goes back to 0%.
 181       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 182       assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 183       if (current_shrink_factor == 0) {
 184         _shrink_factor = 10;
 185       } else {
 186         _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
 187       }
 188       if (PrintGC && Verbose) {
 189         gclog_or_tty->print_cr("  "
 190                       "  shrinking:"
 191                       "  initSize: %.1fK"
 192                       "  maximum_desired_capacity: %.1fK",
 193                       spec()->init_size() / (double) K,
 194                       maximum_desired_capacity / (double) K);
 195         gclog_or_tty->print_cr("  "
 196                       "  shrink_bytes: %.1fK"
 197                       "  current_shrink_factor: %d"
 198                       "  new shrink factor: %d"
 199                       "  _min_heap_delta_bytes: %.1fK",
 200                       shrink_bytes / (double) K,
 201                       current_shrink_factor,
 202                       _shrink_factor,
 203                       _min_heap_delta_bytes / (double) K);
 204       }
 205     }
 206   }
 207 
 208   if (capacity_after_gc > _capacity_at_prologue) {
 209     // We might have expanded for promotions, in which case we might want to
 210     // take back that expansion if there's room after GC.  That keeps us from
 211     // stretching the heap with promotions when there's plenty of room.
 212     size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
 213     expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
 214     // We have two shrinking computations, take the largest
 215     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
 216     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 217     if (PrintGC && Verbose) {
 218       gclog_or_tty->print_cr("  "
 219                              "  aggressive shrinking:"
 220                              "  _capacity_at_prologue: %.1fK"
 221                              "  capacity_after_gc: %.1fK"
 222                              "  expansion_for_promotion: %.1fK"
 223                              "  shrink_bytes: %.1fK",
 224                              capacity_after_gc / (double) K,
 225                              _capacity_at_prologue / (double) K,
 226                              expansion_for_promotion / (double) K,
 227                              shrink_bytes / (double) K);
 228     }
 229   }
 230   // Don't shrink unless it's significant
 231   if (shrink_bytes >= _min_heap_delta_bytes) {
 232     shrink(shrink_bytes);
 233   }
 234   assert(used() == used_after_gc && used_after_gc <= capacity(),
 235          "sanity check");
 236 }
 237 
 238 void TenuredGeneration::gc_prologue(bool full) {
 239   _capacity_at_prologue = capacity();
 240   _used_at_prologue = used();
 241   if (VerifyBeforeGC) {
 242     verify_alloc_buffers_clean();
 243   }
 244 }
 245 
 246 void TenuredGeneration::gc_epilogue(bool full) {
 247   if (VerifyAfterGC) {
 248     verify_alloc_buffers_clean();
 249   }
 250   OneContigSpaceCardGeneration::gc_epilogue(full);
 251 }
 252 
 253 
 254 bool TenuredGeneration::should_collect(bool  full,
 255                                        size_t size,
 256                                        bool   is_tlab) {
 257   // This should be one big conditional or (||), but I want to be able to tell 
 258   // why it returns what it returns (without re-evaluating the conditionals 
 259   // in case they aren't idempotent), so I'm doing it this way.  
 260   // DeMorgan says it's okay.
 261   bool result = false;
 262   if (!result && full) {
 263     result = true;
 264     if (PrintGC && Verbose) {
 265       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 266                     " full");
 267     }
 268   }
 269   if (!result && should_allocate(size, is_tlab)) {
 270     result = true;
 271     if (PrintGC && Verbose) {
 272       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 273                     " should_allocate(" SIZE_FORMAT ")",
 274                     size);
 275     }
 276   }
 277   // If we don't have very much free space.
 278   // XXX: 10000 should be a percentage of the capacity!!!
 279   if (!result && free() < 10000) {
 280     result = true;
 281     if (PrintGC && Verbose) {
 282       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 283                     " free(): " SIZE_FORMAT,
 284                     free());
 285     }
 286   }
 287   // If we had to expand to accomodate promotions from younger generations
 288   if (!result && _capacity_at_prologue < capacity()) {
 289     result = true;
 290     if (PrintGC && Verbose) {
 291       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 292                     "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 293                     _capacity_at_prologue, capacity());
 294     }
 295   }
 296   return result;
 297 }
 298 
 299 void TenuredGeneration::collect(bool   full,
 300                                 bool   clear_all_soft_refs,
 301                                 size_t size,
 302                                 bool   is_tlab) {
 303   retire_alloc_buffers_before_full_gc();  
 304   OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
 305                                         size, is_tlab);
 306 }
 307 
 308 void TenuredGeneration::update_gc_stats(int current_level, 
 309                                         bool full) {
 310   // If the next lower level(s) has been collected, gather any statistics
 311   // that are of interest at this point.
 312   if (!full && (current_level + 1) == level()) {
 313     // Calculate size of data promoted from the younger generations
 314     // before doing the collection.
 315     size_t used_before_gc = used();
 316 
 317     // If the younger gen collections were skipped, then the
 318     // number of promoted bytes will be 0 and adding it to the
 319     // average will incorrectly lessen the average.  It is, however,
 320     // also possible that no promotion was needed.
 321     if (used_before_gc >= _used_at_prologue) {
 322       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 323       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 324     }
 325   }
 326 }
 327 
 328 void TenuredGeneration::update_counters() {
 329   if (UsePerfData) {
 330     _space_counters->update_all();
 331     _gen_counters->update_all();
 332   }
 333 }
 334 
 335 
 336 #ifndef SERIALGC
 337 oop TenuredGeneration::par_promote(int thread_num,
 338                                    oop old, markOop m, size_t word_sz) {
 339 
 340   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 341   HeapWord* obj_ptr = buf->allocate(word_sz);
 342   bool is_lab = true;
 343   if (obj_ptr == NULL) {
 344 #ifndef PRODUCT
 345     if (Universe::heap()->promotion_should_fail()) {
 346       return NULL;
 347     }
 348 #endif  // #ifndef PRODUCT
 349 
 350     // Slow path:
 351     if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
 352       // Is small enough; abandon this buffer and start a new one.
 353       size_t buf_size = buf->word_sz();
 354       HeapWord* buf_space =
 355         TenuredGeneration::par_allocate(buf_size, false);
 356       if (buf_space == NULL) {
 357         buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
 358       }
 359       if (buf_space != NULL) {
 360         buf->retire(false, false);
 361         buf->set_buf(buf_space);
 362         obj_ptr = buf->allocate(word_sz);
 363         assert(obj_ptr != NULL, "Buffer was definitely big enough...");
 364       }
 365     };
 366     // Otherwise, buffer allocation failed; try allocating object
 367     // individually.
 368     if (obj_ptr == NULL) {
 369       obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
 370       if (obj_ptr == NULL) {
 371         obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
 372       }
 373     }
 374     if (obj_ptr == NULL) return NULL;
 375   }
 376   assert(obj_ptr != NULL, "program logic");
 377   Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
 378   oop obj = oop(obj_ptr);
 379   // Restore the mark word copied above.
 380   obj->set_mark(m);
 381   return obj;
 382 }
 383 
 384 void TenuredGeneration::par_promote_alloc_undo(int thread_num,
 385                                                HeapWord* obj,
 386                                                size_t word_sz) {
 387   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 388   if (buf->contains(obj)) {
 389     guarantee(buf->contains(obj + word_sz - 1),
 390               "should contain whole object");
 391     buf->undo_allocation(obj, word_sz);
 392   } else {
 393     SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
 394   }
 395 }
 396 
 397 void TenuredGeneration::par_promote_alloc_done(int thread_num) {
 398   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
 399   buf->retire(true, ParallelGCRetainPLAB);
 400 }
 401 
 402 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
 403   if (UseParNewGC) {
 404     for (uint i = 0; i < ParallelGCThreads; i++) {
 405       _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
 406     }
 407   }
 408 }
 409 
 410 // Verify that any retained parallel allocation buffers do not
 411 // intersect with dirty cards.
 412 void TenuredGeneration::verify_alloc_buffers_clean() {
 413   if (UseParNewGC) {
 414     for (uint i = 0; i < ParallelGCThreads; i++) {
 415       _rs->verify_empty(_alloc_buffers[i]->range());
 416     }
 417   }
 418 }
 419 #else  // SERIALGC
 420 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
 421 void TenuredGeneration::verify_alloc_buffers_clean() {}
 422 #endif // SERIALGC
 423 
 424 bool TenuredGeneration::promotion_attempt_is_safe(
 425     size_t max_promotion_in_bytes,
 426     bool younger_handles_promotion_failure) const {
 427 
 428   bool result = max_contiguous_available() >= max_promotion_in_bytes;
 429 
 430   if (younger_handles_promotion_failure && !result) {
 431     result = max_contiguous_available() >= 
 432       (size_t) gc_stats()->avg_promoted()->padded_average();
 433     if (PrintGC && Verbose && result) {
 434       gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
 435                   " contiguous_available: " SIZE_FORMAT
 436                   " avg_promoted: " SIZE_FORMAT,
 437                   max_contiguous_available(), 
 438                   gc_stats()->avg_promoted()->padded_average());
 439     }
 440   } else {
 441     if (PrintGC && Verbose) {
 442       gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
 443                   " contiguous_available: " SIZE_FORMAT
 444                   " promotion_in_bytes: " SIZE_FORMAT,
 445                   max_contiguous_available(), max_promotion_in_bytes);
 446     }
 447   }
 448   return result;
 449 }