1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/genMarkSweep.hpp"
  27 #include "gc/serial/tenuredGeneration.inline.hpp"
  28 #include "gc/shared/blockOffsetTable.inline.hpp"
  29 #include "gc/shared/cardGeneration.inline.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/genOopClosures.inline.hpp"
  33 #include "gc/shared/generationSpec.hpp"
  34 #include "gc/shared/space.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/java.hpp"
  38 #include "utilities/macros.hpp"
  39 #if INCLUDE_ALL_GCS
  40 #include "gc/cms/parOopClosures.hpp"
  41 #endif
  42 
  43 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  44                                      size_t initial_byte_size,
  45                                      CardTableRS* remset) :
  46   CardGeneration(rs, initial_byte_size, remset)
  47 {
  48   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  49   HeapWord* end    = (HeapWord*) _virtual_space.high();
  50   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  51   _the_space->reset_saved_mark();
  52   _shrink_factor = 0;
  53   _capacity_at_prologue = 0;
  54 
  55   _gc_stats = new GCStats();
  56 
  57   // initialize performance counters
  58 
  59   const char* gen_name = "old";
  60   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
  61   // Generation Counters -- generation 1, 1 subspace
  62   _gen_counters = new GenerationCounters(gen_name, 1, 1,
  63       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
  64 
  65   _gc_counters = new CollectorCounters("MSC", 1);
  66 
  67   _space_counters = new CSpaceCounters(gen_name, 0,
  68                                        _virtual_space.reserved_size(),
  69                                        _the_space, _gen_counters);
  70 }
  71 
  72 void TenuredGeneration::gc_prologue(bool full) {
  73   _capacity_at_prologue = capacity();
  74   _used_at_prologue = used();
  75 }
  76 
  77 bool TenuredGeneration::should_collect(bool  full,
  78                                        size_t size,
  79                                        bool   is_tlab) {
  80   // This should be one big conditional or (||), but I want to be able to tell
  81   // why it returns what it returns (without re-evaluating the conditionals
  82   // in case they aren't idempotent), so I'm doing it this way.
  83   // DeMorgan says it's okay.
  84   bool result = false;
  85   if (!result && full) {
  86     result = true;
  87     if (PrintGC && Verbose) {
  88       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
  89                     " full");
  90     }
  91   }
  92   if (!result && should_allocate(size, is_tlab)) {
  93     result = true;
  94     if (PrintGC && Verbose) {
  95       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
  96                     " should_allocate(" SIZE_FORMAT ")",
  97                     size);
  98     }
  99   }
 100   // If we don't have very much free space.
 101   // XXX: 10000 should be a percentage of the capacity!!!
 102   if (!result && free() < 10000) {
 103     result = true;
 104     if (PrintGC && Verbose) {
 105       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 106                     " free(): " SIZE_FORMAT,
 107                     free());
 108     }
 109   }
 110   // If we had to expand to accommodate promotions from the young generation
 111   if (!result && _capacity_at_prologue < capacity()) {
 112     result = true;
 113     if (PrintGC && Verbose) {
 114       gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
 115                     "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 116                     _capacity_at_prologue, capacity());
 117     }
 118   }
 119   return result;
 120 }
 121 
 122 void TenuredGeneration::compute_new_size() {
 123   assert_locked_or_safepoint(Heap_lock);
 124 
 125   // Compute some numbers about the state of the heap.
 126   const size_t used_after_gc = used();
 127   const size_t capacity_after_gc = capacity();
 128 
 129   CardGeneration::compute_new_size();
 130 
 131   assert(used() == used_after_gc && used_after_gc <= capacity(),
 132          "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 133          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity());
 134 }
 135 
 136 void TenuredGeneration::update_gc_stats(Generation* current_generation,
 137                                         bool full) {
 138   // If the young generation has been collected, gather any statistics
 139   // that are of interest at this point.
 140   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
 141   if (!full && current_is_young) {
 142     // Calculate size of data promoted from the young generation
 143     // before doing the collection.
 144     size_t used_before_gc = used();
 145 
 146     // If the young gen collection was skipped, then the
 147     // number of promoted bytes will be 0 and adding it to the
 148     // average will incorrectly lessen the average.  It is, however,
 149     // also possible that no promotion was needed.
 150     if (used_before_gc >= _used_at_prologue) {
 151       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 152       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 153     }
 154   }
 155 }
 156 
 157 void TenuredGeneration::update_counters() {
 158   if (UsePerfData) {
 159     _space_counters->update_all();
 160     _gen_counters->update_all();
 161   }
 162 }
 163 
 164 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 165   size_t available = max_contiguous_available();
 166   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 167   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 168   if (PrintGC && Verbose) {
 169     gclog_or_tty->print_cr(
 170       "Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
 171       "max_promo(" SIZE_FORMAT ")",
 172       res? "":" not", available, res? ">=":"<",
 173       av_promo, max_promotion_in_bytes);
 174   }
 175   return res;
 176 }
 177 
 178 void TenuredGeneration::collect(bool   full,
 179                                 bool   clear_all_soft_refs,
 180                                 size_t size,
 181                                 bool   is_tlab) {
 182   GenCollectedHeap* gch = GenCollectedHeap::heap();
 183 
 184   // Temporarily expand the span of our ref processor, so
 185   // refs discovery is over the entire heap, not just this generation
 186   ReferenceProcessorSpanMutator
 187     x(ref_processor(), gch->reserved_region());
 188 
 189   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
 190   gc_timer->register_gc_start();
 191 
 192   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
 193   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 194 
 195   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
 196 
 197   gc_timer->register_gc_end();
 198 
 199   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 200 }
 201 
 202 HeapWord*
 203 TenuredGeneration::expand_and_allocate(size_t word_size,
 204                                        bool is_tlab,
 205                                        bool parallel) {
 206   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
 207   if (parallel) {
 208     MutexLocker x(ParGCRareEvent_lock);
 209     HeapWord* result = NULL;
 210     size_t byte_size = word_size * HeapWordSize;
 211     while (true) {
 212       expand(byte_size, _min_heap_delta_bytes);
 213       if (GCExpandToAllocateDelayMillis > 0) {
 214         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 215       }
 216       result = _the_space->par_allocate(word_size);
 217       if ( result != NULL) {
 218         return result;
 219       } else {
 220         // If there's not enough expansion space available, give up.
 221         if (_virtual_space.uncommitted_size() < byte_size) {
 222           return NULL;
 223         }
 224         // else try again
 225       }
 226     }
 227   } else {
 228     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 229     return _the_space->allocate(word_size);
 230   }
 231 }
 232 
 233 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 234   GCMutexLocker x(ExpandHeap_lock);
 235   return CardGeneration::expand(bytes, expand_bytes);
 236 }
 237 
 238 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
 239   return _the_space->free();
 240 }
 241 
 242 size_t TenuredGeneration::contiguous_available() const {
 243   return _the_space->free() + _virtual_space.uncommitted_size();
 244 }
 245 
 246 void TenuredGeneration::assert_correct_size_change_locking() {
 247   assert_locked_or_safepoint(ExpandHeap_lock);
 248 }
 249 
 250 // Currently nothing to do.
 251 void TenuredGeneration::prepare_for_verify() {}
 252 
 253 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
 254   _the_space->object_iterate(blk);
 255 }
 256 
 257 void TenuredGeneration::save_marks() {
 258   _the_space->set_saved_mark();
 259 }
 260 
 261 void TenuredGeneration::reset_saved_marks() {
 262   _the_space->reset_saved_mark();
 263 }
 264 
 265 bool TenuredGeneration::no_allocs_since_save_marks() {
 266   return _the_space->saved_mark_at_top();
 267 }
 268 
 269 #define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
 270                                                                                 \
 271 void TenuredGeneration::                                                        \
 272 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 273   blk->assert_generation(this);                                                 \
 274   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 275   save_marks();                                                                 \
 276 }
 277 
 278 ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
 279 
 280 #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
 281 
 282 void TenuredGeneration::gc_epilogue(bool full) {
 283   // update the generation and space performance counters
 284   update_counters();
 285   if (ZapUnusedHeapArea) {
 286     _the_space->check_mangled_unused_area_complete();
 287   }
 288 }
 289 
 290 void TenuredGeneration::record_spaces_top() {
 291   assert(ZapUnusedHeapArea, "Not mangling unused space");
 292   _the_space->set_top_for_allocations();
 293 }
 294 
 295 void TenuredGeneration::verify() {
 296   _the_space->verify();
 297 }
 298 
 299 void TenuredGeneration::print_on(outputStream* st)  const {
 300   Generation::print_on(st);
 301   st->print("   the");
 302   _the_space->print_on(st);
 303 }