1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcTimer.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/blockOffsetTable.inline.hpp"
  30 #include "memory/cardGeneration.inline.hpp"
  31 #include "memory/generationSpec.hpp"
  32 #include "memory/genMarkSweep.hpp"
  33 #include "memory/genOopClosures.inline.hpp"
  34 #include "memory/space.hpp"
  35 #include "memory/tenuredGeneration.inline.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/java.hpp"
  38 #include "utilities/macros.hpp"
  39 
  40 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  41                                      size_t initial_byte_size, int level,
  42                                      GenRemSet* remset) :
  43   CardGeneration(rs, initial_byte_size, level, remset)
  44 {
  45   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  46   HeapWord* end    = (HeapWord*) _virtual_space.high();
  47   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  48   _the_space->reset_saved_mark();
  49   _shrink_factor = 0;
  50   _capacity_at_prologue = 0;
  51 
  52   _gc_stats = new GCStats();
  53 
  54   // initialize performance counters
  55 
  56   const char* gen_name = "old";
  57   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
  58 
  59   // Generation Counters -- generation 1, 1 subspace
  60   _gen_counters = new GenerationCounters(gen_name, 1, 1,
  61       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
  62 
  63   _gc_counters = new CollectorCounters("MSC", 1);
  64 
  65   _space_counters = new CSpaceCounters(gen_name, 0,
  66                                        _virtual_space.reserved_size(),
  67                                        _the_space, _gen_counters);
  68 }
  69 
  70 void TenuredGeneration::gc_prologue(bool full) {
  71   _capacity_at_prologue = capacity();
  72   _used_at_prologue = used();
  73 }
  74 
  75 bool TenuredGeneration::should_collect(bool  full,
  76                                        size_t size,
  77                                        bool   is_tlab) {
  78   // This should be one big conditional or (||), but I want to be able to tell
  79   // why it returns what it returns (without re-evaluating the conditionals
  80   // in case they aren't idempotent), so I'm doing it this way.
  81   // DeMorgan says it's okay.
  82   bool result = false;
  83   if (!result && full) {
  84     result = true;
  85     if (PrintGC && Verbose) {
  86       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
  87                     " full");
  88     }
  89   }
  90   if (!result && should_allocate(size, is_tlab)) {
  91     result = true;
  92     if (PrintGC && Verbose) {
  93       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
  94                     " should_allocate(" SIZE_FORMAT ")",
  95                     size);
  96     }
  97   }
  98   // If we don't have very much free space.
  99   // XXX: 10000 should be a percentage of the capacity!!!
 100   if (!result && free() < 10000) {
 101     result = true;
 102     if (PrintGC && Verbose) {
 103       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 104                     " free(): " SIZE_FORMAT,
 105                     free());
 106     }
 107   }
 108   // If we had to expand to accommodate promotions from younger generations
 109   if (!result && _capacity_at_prologue < capacity()) {
 110     result = true;
 111     if (PrintGC && Verbose) {
 112       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 113                     "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 114                     _capacity_at_prologue, capacity());
 115     }
 116   }
 117   return result;
 118 }
 119 
 120 void TenuredGeneration::compute_new_size() {
 121   assert_locked_or_safepoint(Heap_lock);
 122 
 123   // Compute some numbers about the state of the heap.
 124   const size_t used_after_gc = used();
 125   const size_t capacity_after_gc = capacity();
 126 
 127   CardGeneration::compute_new_size();
 128 
 129   assert(used() == used_after_gc && used_after_gc <= capacity(),
 130          err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 131          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
 132 }
 133 
 134 void TenuredGeneration::update_gc_stats(int current_level,
 135                                         bool full) {
 136   // If the next lower level(s) has been collected, gather any statistics
 137   // that are of interest at this point.
 138   if (!full && (current_level + 1) == level()) {
 139     // Calculate size of data promoted from the younger generations
 140     // before doing the collection.
 141     size_t used_before_gc = used();
 142 
 143     // If the younger gen collections were skipped, then the
 144     // number of promoted bytes will be 0 and adding it to the
 145     // average will incorrectly lessen the average.  It is, however,
 146     // also possible that no promotion was needed.
 147     if (used_before_gc >= _used_at_prologue) {
 148       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 149       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 150     }
 151   }
 152 }
 153 
 154 void TenuredGeneration::update_counters() {
 155   if (UsePerfData) {
 156     _space_counters->update_all();
 157     _gen_counters->update_all();
 158   }
 159 }
 160 
 161 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 162   size_t available = max_contiguous_available();
 163   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 164   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 165   if (PrintGC && Verbose) {
 166     gclog_or_tty->print_cr(
 167       "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 168       "max_promo("SIZE_FORMAT")",
 169       res? "":" not", available, res? ">=":"<",
 170       av_promo, max_promotion_in_bytes);
 171   }
 172   return res;
 173 }
 174 
 175 void TenuredGeneration::collect(bool   full,
 176                                 bool   clear_all_soft_refs,
 177                                 size_t size,
 178                                 bool   is_tlab) {
 179   GenCollectedHeap* gch = GenCollectedHeap::heap();
 180 
 181   SpecializationStats::clear();
 182   // Temporarily expand the span of our ref processor, so
 183   // refs discovery is over the entire heap, not just this generation
 184   ReferenceProcessorSpanMutator
 185     x(ref_processor(), gch->reserved_region());
 186 
 187   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
 188   gc_timer->register_gc_start();
 189 
 190   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
 191   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 192 
 193   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
 194 
 195   gc_timer->register_gc_end();
 196 
 197   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 198 
 199   SpecializationStats::print();
 200 }
 201 
 202 HeapWord*
 203 TenuredGeneration::expand_and_allocate(size_t word_size,
 204                                        bool is_tlab,
 205                                        bool parallel) {
 206   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
 207   if (parallel) {
 208     MutexLocker x(ParGCRareEvent_lock);
 209     HeapWord* result = NULL;
 210     size_t byte_size = word_size * HeapWordSize;
 211     while (true) {
 212       expand(byte_size, _min_heap_delta_bytes);
 213       if (GCExpandToAllocateDelayMillis > 0) {
 214         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 215       }
 216       result = _the_space->par_allocate(word_size);
 217       if ( result != NULL) {
 218         return result;
 219       } else {
 220         // If there's not enough expansion space available, give up.
 221         if (_virtual_space.uncommitted_size() < byte_size) {
 222           return NULL;
 223         }
 224         // else try again
 225       }
 226     }
 227   } else {
 228     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 229     return _the_space->allocate(word_size);
 230   }
 231 }
 232 
 233 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 234   GCMutexLocker x(ExpandHeap_lock);
 235   return CardGeneration::expand(bytes, expand_bytes);
 236 }
 237 
 238 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
 239   return _the_space->free();
 240 }
 241 
 242 size_t TenuredGeneration::contiguous_available() const {
 243   return _the_space->free() + _virtual_space.uncommitted_size();
 244 }
 245 
 246 void TenuredGeneration::assert_correct_size_change_locking() {
 247   assert_locked_or_safepoint(ExpandHeap_lock);
 248 }
 249 
 250 // Currently nothing to do.
 251 void TenuredGeneration::prepare_for_verify() {}
 252 
 253 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
 254   _the_space->object_iterate(blk);
 255 }
 256 
 257 void TenuredGeneration::save_marks() {
 258   _the_space->set_saved_mark();
 259 }
 260 
 261 void TenuredGeneration::reset_saved_marks() {
 262   _the_space->reset_saved_mark();
 263 }
 264 
 265 bool TenuredGeneration::no_allocs_since_save_marks() {
 266   return _the_space->saved_mark_at_top();
 267 }
 268 
 269 #define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
 270                                                                                 \
 271 void TenuredGeneration::                                                        \
 272 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 273   blk->set_generation(this);                                                    \
 274   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 275   blk->reset_generation();                                                      \
 276   save_marks();                                                                 \
 277 }
 278 
 279 ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
 280 
 281 #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
 282 
 283 void TenuredGeneration::gc_epilogue(bool full) {
 284   // update the generation and space performance counters
 285   update_counters();
 286   if (ZapUnusedHeapArea) {
 287     _the_space->check_mangled_unused_area_complete();
 288   }
 289 }
 290 
 291 void TenuredGeneration::record_spaces_top() {
 292   assert(ZapUnusedHeapArea, "Not mangling unused space");
 293   _the_space->set_top_for_allocations();
 294 }
 295 
 296 void TenuredGeneration::verify() {
 297   _the_space->verify();
 298 }
 299 
 300 void TenuredGeneration::print_on(outputStream* st)  const {
 301   Generation::print_on(st);
 302   st->print("   the");
 303   _the_space->print_on(st);
 304 }