1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcTimer.hpp"
  28 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "memory/blockOffsetTable.inline.hpp"
  31 #include "memory/generationSpec.hpp"
  32 #include "memory/genMarkSweep.hpp"
  33 #include "memory/genOopClosures.inline.hpp"
  34 #include "memory/space.hpp"
  35 #include "memory/tenuredGeneration.inline.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/java.hpp"
  38 #include "utilities/macros.hpp"
  39 
  40 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  41                                      size_t initial_byte_size, int level,
  42                                      GenRemSet* remset) :
  43   CardGeneration(rs, initial_byte_size, level, remset)
  44 {
  45   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  46   HeapWord* end    = (HeapWord*) _virtual_space.high();
  47   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  48   _the_space->reset_saved_mark();
  49   _shrink_factor = 0;
  50   _capacity_at_prologue = 0;
  51 
  52   _gc_stats = new GCStats();
  53 
  54   // initialize performance counters
  55 
  56   const char* gen_name = "old";
  57   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
  58 
  59   // Generation Counters -- generation 1, 1 subspace
  60   _gen_counters = new GenerationCounters(gen_name, 1, 1,
  61       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
  62 
  63   _gc_counters = new CollectorCounters("MSC", 1);
  64 
  65   _space_counters = new CSpaceCounters(gen_name, 0,
  66                                        _virtual_space.reserved_size(),
  67                                        _the_space, _gen_counters);
  68 }
  69 
  70 void TenuredGeneration::gc_prologue(bool full) {
  71   _capacity_at_prologue = capacity();
  72   _used_at_prologue = used();
  73 }
  74 
  75 bool TenuredGeneration::should_collect(bool  full,
  76                                        size_t size,
  77                                        bool   is_tlab) {
  78   // This should be one big conditional or (||), but I want to be able to tell
  79   // why it returns what it returns (without re-evaluating the conditionals
  80   // in case they aren't idempotent), so I'm doing it this way.
  81   // DeMorgan says it's okay.
  82   bool result = false;
  83   if (!result && full) {
  84     result = true;
  85     if (PrintGC && Verbose) {
  86       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
  87                     " full");
  88     }
  89   }
  90   if (!result && should_allocate(size, is_tlab)) {
  91     result = true;
  92     if (PrintGC && Verbose) {
  93       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
  94                     " should_allocate(" SIZE_FORMAT ")",
  95                     size);
  96     }
  97   }
  98   // If we don't have very much free space.
  99   // XXX: 10000 should be a percentage of the capacity!!!
 100   if (!result && free() < 10000) {
 101     result = true;
 102     if (PrintGC && Verbose) {
 103       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 104                     " free(): " SIZE_FORMAT,
 105                     free());
 106     }
 107   }
 108   // If we had to expand to accommodate promotions from younger generations
 109   if (!result && _capacity_at_prologue < capacity()) {
 110     result = true;
 111     if (PrintGC && Verbose) {
 112       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 113                     "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 114                     _capacity_at_prologue, capacity());
 115     }
 116   }
 117   return result;
 118 }
 119 
 120 void TenuredGeneration::compute_new_size() {
 121   assert_locked_or_safepoint(Heap_lock);
 122 
 123   // Compute some numbers about the state of the heap.
 124   const size_t used_after_gc = used();
 125   const size_t capacity_after_gc = capacity();
 126 
 127   CardGeneration::compute_new_size();
 128 
 129   assert(used() == used_after_gc && used_after_gc <= capacity(),
 130          err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 131          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
 132 }
 133 
 134 void TenuredGeneration::update_gc_stats(int current_level,
 135                                         bool full) {
 136   // If the next lower level(s) has been collected, gather any statistics
 137   // that are of interest at this point.
 138   if (!full && (current_level + 1) == level()) {
 139     // Calculate size of data promoted from the younger generations
 140     // before doing the collection.
 141     size_t used_before_gc = used();
 142 
 143     // If the younger gen collections were skipped, then the
 144     // number of promoted bytes will be 0 and adding it to the
 145     // average will incorrectly lessen the average.  It is, however,
 146     // also possible that no promotion was needed.
 147     if (used_before_gc >= _used_at_prologue) {
 148       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 149       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 150     }
 151   }
 152 }
 153 
 154 void TenuredGeneration::update_counters() {
 155   if (UsePerfData) {
 156     _space_counters->update_all();
 157     _gen_counters->update_all();
 158   }
 159 }
 160 
 161 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 162   size_t available = max_contiguous_available();
 163   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 164   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 165   if (PrintGC && Verbose) {
 166     gclog_or_tty->print_cr(
 167       "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 168       "max_promo("SIZE_FORMAT")",
 169       res? "":" not", available, res? ">=":"<",
 170       av_promo, max_promotion_in_bytes);
 171   }
 172   return res;
 173 }
 174 
 175 void TenuredGeneration::collect(bool   full,
 176                                            bool   clear_all_soft_refs,
 177                                            size_t size,
 178                                            bool   is_tlab) {
 179   GenCollectedHeap* gch = GenCollectedHeap::heap();
 180 
 181   SpecializationStats::clear();
 182   // Temporarily expand the span of our ref processor, so
 183   // refs discovery is over the entire heap, not just this generation
 184   ReferenceProcessorSpanMutator
 185     x(ref_processor(), gch->reserved_region());
 186 
 187   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
 188   gc_timer->register_gc_start();
 189 
 190   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
 191   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 192 
 193   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
 194 
 195   gc_timer->register_gc_end();
 196 
 197   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 198 
 199   SpecializationStats::print();
 200 }
 201 
 202 HeapWord*
 203 TenuredGeneration::expand_and_allocate(size_t word_size,
 204                                                   bool is_tlab,
 205                                                   bool parallel) {
 206   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
 207   if (parallel) {
 208     MutexLocker x(ParGCRareEvent_lock);
 209     HeapWord* result = NULL;
 210     size_t byte_size = word_size * HeapWordSize;
 211     while (true) {
 212       expand(byte_size, _min_heap_delta_bytes);
 213       if (GCExpandToAllocateDelayMillis > 0) {
 214         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 215       }
 216       result = _the_space->par_allocate(word_size);
 217       if ( result != NULL) {
 218         return result;
 219       } else {
 220         // If there's not enough expansion space available, give up.
 221         if (_virtual_space.uncommitted_size() < byte_size) {
 222           return NULL;
 223         }
 224         // else try again
 225       }
 226     }
 227   } else {
 228     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 229     return _the_space->allocate(word_size);
 230   }
 231 }
 232 
 233 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 234   GCMutexLocker x(ExpandHeap_lock);
 235   return CardGeneration::expand(bytes, expand_bytes);
 236 }
 237 
 238 
 239 void TenuredGeneration::shrink(size_t bytes) {
 240   assert_locked_or_safepoint(ExpandHeap_lock);
 241   size_t size = ReservedSpace::page_align_size_down(bytes);
 242   if (size > 0) {
 243     shrink_by(size);
 244   }
 245 }
 246 
 247 
 248 size_t TenuredGeneration::capacity() const {
 249   return _the_space->capacity();
 250 }
 251 
 252 
 253 size_t TenuredGeneration::used() const {
 254   return _the_space->used();
 255 }
 256 
 257 
 258 size_t TenuredGeneration::free() const {
 259   return _the_space->free();
 260 }
 261 
 262 MemRegion TenuredGeneration::used_region() const {
 263   return the_space()->used_region();
 264 }
 265 
 266 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
 267   return _the_space->free();
 268 }
 269 
 270 size_t TenuredGeneration::contiguous_available() const {
 271   return _the_space->free() + _virtual_space.uncommitted_size();
 272 }
 273 
 274 bool TenuredGeneration::grow_by(size_t bytes) {
 275   assert_locked_or_safepoint(ExpandHeap_lock);
 276   bool result = _virtual_space.expand_by(bytes);
 277   if (result) {
 278     size_t new_word_size =
 279        heap_word_size(_virtual_space.committed_size());
 280     MemRegion mr(_the_space->bottom(), new_word_size);
 281     // Expand card table
 282     Universe::heap()->barrier_set()->resize_covered_region(mr);
 283     // Expand shared block offset array
 284     _bts->resize(new_word_size);
 285 
 286     // Fix for bug #4668531
 287     if (ZapUnusedHeapArea) {
 288       MemRegion mangle_region(_the_space->end(),
 289       (HeapWord*)_virtual_space.high());
 290       SpaceMangler::mangle_region(mangle_region);
 291     }
 292 
 293     // Expand space -- also expands space's BOT
 294     // (which uses (part of) shared array above)
 295     _the_space->set_end((HeapWord*)_virtual_space.high());
 296 
 297     // update the space and generation capacity counters
 298     update_counters();
 299 
 300     if (Verbose && PrintGC) {
 301       size_t new_mem_size = _virtual_space.committed_size();
 302       size_t old_mem_size = new_mem_size - bytes;
 303       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
 304                       SIZE_FORMAT "K to " SIZE_FORMAT "K",
 305                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
 306     }
 307   }
 308   return result;
 309 }
 310 
 311 
 312 bool TenuredGeneration::grow_to_reserved() {
 313   assert_locked_or_safepoint(ExpandHeap_lock);
 314   bool success = true;
 315   const size_t remaining_bytes = _virtual_space.uncommitted_size();
 316   if (remaining_bytes > 0) {
 317     success = grow_by(remaining_bytes);
 318     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
 319   }
 320   return success;
 321 }
 322 
 323 void TenuredGeneration::shrink_by(size_t bytes) {
 324   assert_locked_or_safepoint(ExpandHeap_lock);
 325   // Shrink committed space
 326   _virtual_space.shrink_by(bytes);
 327   // Shrink space; this also shrinks the space's BOT
 328   _the_space->set_end((HeapWord*) _virtual_space.high());
 329   size_t new_word_size = heap_word_size(_the_space->capacity());
 330   // Shrink the shared block offset array
 331   _bts->resize(new_word_size);
 332   MemRegion mr(_the_space->bottom(), new_word_size);
 333   // Shrink the card table
 334   Universe::heap()->barrier_set()->resize_covered_region(mr);
 335 
 336   if (Verbose && PrintGC) {
 337     size_t new_mem_size = _virtual_space.committed_size();
 338     size_t old_mem_size = new_mem_size + bytes;
 339     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 340                   name(), old_mem_size/K, new_mem_size/K);
 341   }
 342 }
 343 
 344 // Currently nothing to do.
 345 void TenuredGeneration::prepare_for_verify() {}
 346 
 347 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
 348   _the_space->object_iterate(blk);
 349 }
 350 
 351 void TenuredGeneration::space_iterate(SpaceClosure* blk,
 352                                                  bool usedOnly) {
 353   blk->do_space(_the_space);
 354 }
 355 
 356 void TenuredGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
 357   blk->set_generation(this);
 358   younger_refs_in_space_iterate(_the_space, blk);
 359   blk->reset_generation();
 360 }
 361 
 362 void TenuredGeneration::save_marks() {
 363   _the_space->set_saved_mark();
 364 }
 365 
 366 
 367 void TenuredGeneration::reset_saved_marks() {
 368   _the_space->reset_saved_mark();
 369 }
 370 
 371 
 372 bool TenuredGeneration::no_allocs_since_save_marks() {
 373   return _the_space->saved_mark_at_top();
 374 }
 375 
 376 #define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
 377                                                                                 \
 378 void TenuredGeneration::                                                        \
 379 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 380   blk->set_generation(this);                                                    \
 381   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 382   blk->reset_generation();                                                      \
 383   save_marks();                                                                 \
 384 }
 385 
 386 ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
 387 
 388 #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
 389 
 390 
 391 void TenuredGeneration::gc_epilogue(bool full) {
 392   _last_gc = WaterMark(the_space(), the_space()->top());
 393 
 394   // update the generation and space performance counters
 395   update_counters();
 396   if (ZapUnusedHeapArea) {
 397     the_space()->check_mangled_unused_area_complete();
 398   }
 399 }
 400 
 401 void TenuredGeneration::record_spaces_top() {
 402   assert(ZapUnusedHeapArea, "Not mangling unused space");
 403   the_space()->set_top_for_allocations();
 404 }
 405 
 406 void TenuredGeneration::verify() {
 407   the_space()->verify();
 408 }
 409 
 410 void TenuredGeneration::print_on(outputStream* st)  const {
 411   Generation::print_on(st);
 412   st->print("   the");
 413   the_space()->print_on(st);
 414 }