1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/genMarkSweep.hpp"
  27 #include "gc/serial/tenuredGeneration.inline.hpp"
  28 #include "gc/shared/blockOffsetTable.inline.hpp"
  29 #include "gc/shared/cardGeneration.inline.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/genOopClosures.inline.hpp"
  34 #include "gc/shared/generationSpec.hpp"
  35 #include "gc/shared/space.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/java.hpp"
  40 #include "utilities/macros.hpp"
  41 #if INCLUDE_ALL_GCS
  42 #include "gc/cms/parOopClosures.hpp"
  43 #endif
  44 
  45 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  46                                      size_t initial_byte_size,
  47                                      CardTableRS* remset) :
  48   CardGeneration(rs, initial_byte_size, remset)
  49 {
  50   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  51   HeapWord* end    = (HeapWord*) _virtual_space.high();
  52   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  53   _the_space->reset_saved_mark();
  54   _shrink_factor = 0;
  55   _capacity_at_prologue = 0;
  56 
  57   _gc_stats = new GCStats();
  58 
  59   // initialize performance counters
  60 
  61   const char* gen_name = "old";
  62   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
  63   // Generation Counters -- generation 1, 1 subspace
  64   _gen_counters = new GenerationCounters(gen_name, 1, 1,
  65       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
  66 
  67   _gc_counters = new CollectorCounters("MSC", 1);
  68 
  69   _space_counters = new CSpaceCounters(gen_name, 0,
  70                                        _virtual_space.reserved_size(),
  71                                        _the_space, _gen_counters);
  72 }
  73 
  74 void TenuredGeneration::gc_prologue(bool full) {
  75   _capacity_at_prologue = capacity();
  76   _used_at_prologue = used();
  77 }
  78 
  79 bool TenuredGeneration::should_collect(bool  full,
  80                                        size_t size,
  81                                        bool   is_tlab) {
  82   // This should be one big conditional or (||), but I want to be able to tell
  83   // why it returns what it returns (without re-evaluating the conditionals
  84   // in case they aren't idempotent), so I'm doing it this way.
  85   // DeMorgan says it's okay.
  86   if (full) {
  87     log_trace(gc)("TenuredGeneration::should_collect: because full");
  88     return true;
  89   }
  90   if (should_allocate(size, is_tlab)) {
  91     log_trace(gc)("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT ")", size);
  92     return true;
  93   }
  94   // If we don't have very much free space.
  95   // XXX: 10000 should be a percentage of the capacity!!!
  96   if (free() < 10000) {
  97     log_trace(gc)("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT, free());
  98     return true;
  99   }
 100   // If we had to expand to accommodate promotions from the young generation
 101   if (_capacity_at_prologue < capacity()) {
 102     log_trace(gc)("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 103         _capacity_at_prologue, capacity());
 104     return true;
 105   }
 106 
 107   return false;
 108 }
 109 
 110 void TenuredGeneration::compute_new_size() {
 111   assert_locked_or_safepoint(Heap_lock);
 112 
 113   // Compute some numbers about the state of the heap.
 114   const size_t used_after_gc = used();
 115   const size_t capacity_after_gc = capacity();
 116 
 117   CardGeneration::compute_new_size();
 118 
 119   assert(used() == used_after_gc && used_after_gc <= capacity(),
 120          "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 121          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity());
 122 }
 123 
 124 void TenuredGeneration::update_gc_stats(Generation* current_generation,
 125                                         bool full) {
 126   // If the young generation has been collected, gather any statistics
 127   // that are of interest at this point.
 128   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
 129   if (!full && current_is_young) {
 130     // Calculate size of data promoted from the young generation
 131     // before doing the collection.
 132     size_t used_before_gc = used();
 133 
 134     // If the young gen collection was skipped, then the
 135     // number of promoted bytes will be 0 and adding it to the
 136     // average will incorrectly lessen the average.  It is, however,
 137     // also possible that no promotion was needed.
 138     if (used_before_gc >= _used_at_prologue) {
 139       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 140       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 141     }
 142   }
 143 }
 144 
 145 void TenuredGeneration::update_counters() {
 146   if (UsePerfData) {
 147     _space_counters->update_all();
 148     _gen_counters->update_all();
 149   }
 150 }
 151 
 152 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 153   size_t available = max_contiguous_available();
 154   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 155   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 156 
 157   log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 158     res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 159 
 160   return res;
 161 }
 162 
 163 void TenuredGeneration::collect(bool   full,
 164                                 bool   clear_all_soft_refs,
 165                                 size_t size,
 166                                 bool   is_tlab) {
 167   GenCollectedHeap* gch = GenCollectedHeap::heap();
 168 
 169   // Temporarily expand the span of our ref processor, so
 170   // refs discovery is over the entire heap, not just this generation
 171   ReferenceProcessorSpanMutator
 172     x(ref_processor(), gch->reserved_region());
 173 
 174   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
 175   gc_timer->register_gc_start();
 176 
 177   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
 178   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 179 
 180   gch->pre_full_gc_dump(gc_timer);
 181 
 182   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
 183 
 184   gch->post_full_gc_dump(gc_timer);
 185 
 186   gc_timer->register_gc_end();
 187 
 188   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 189 }
 190 
 191 HeapWord*
 192 TenuredGeneration::expand_and_allocate(size_t word_size,
 193                                        bool is_tlab,
 194                                        bool parallel) {
 195   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
 196   if (parallel) {
 197     MutexLocker x(ParGCRareEvent_lock);
 198     HeapWord* result = NULL;
 199     size_t byte_size = word_size * HeapWordSize;
 200     while (true) {
 201       expand(byte_size, _min_heap_delta_bytes);
 202       if (GCExpandToAllocateDelayMillis > 0) {
 203         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 204       }
 205       result = _the_space->par_allocate(word_size);
 206       if ( result != NULL) {
 207         return result;
 208       } else {
 209         // If there's not enough expansion space available, give up.
 210         if (_virtual_space.uncommitted_size() < byte_size) {
 211           return NULL;
 212         }
 213         // else try again
 214       }
 215     }
 216   } else {
 217     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 218     return _the_space->allocate(word_size);
 219   }
 220 }
 221 
 222 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 223   GCMutexLocker x(ExpandHeap_lock);
 224   return CardGeneration::expand(bytes, expand_bytes);
 225 }
 226 
 227 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
 228   return _the_space->free();
 229 }
 230 
 231 size_t TenuredGeneration::contiguous_available() const {
 232   return _the_space->free() + _virtual_space.uncommitted_size();
 233 }
 234 
 235 void TenuredGeneration::assert_correct_size_change_locking() {
 236   assert_locked_or_safepoint(ExpandHeap_lock);
 237 }
 238 
 239 // Currently nothing to do.
 240 void TenuredGeneration::prepare_for_verify() {}
 241 
 242 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
 243   _the_space->object_iterate(blk);
 244 }
 245 
 246 void TenuredGeneration::save_marks() {
 247   _the_space->set_saved_mark();
 248 }
 249 
 250 void TenuredGeneration::reset_saved_marks() {
 251   _the_space->reset_saved_mark();
 252 }
 253 
 254 bool TenuredGeneration::no_allocs_since_save_marks() {
 255   return _the_space->saved_mark_at_top();
 256 }
 257 
 258 #define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
 259                                                                                 \
 260 void TenuredGeneration::                                                        \
 261 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 262   blk->set_generation(this);                                                    \
 263   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 264   blk->reset_generation();                                                      \
 265   save_marks();                                                                 \
 266 }
 267 
 268 ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
 269 
 270 #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
 271 
 272 void TenuredGeneration::gc_epilogue(bool full) {
 273   // update the generation and space performance counters
 274   update_counters();
 275   if (ZapUnusedHeapArea) {
 276     _the_space->check_mangled_unused_area_complete();
 277   }
 278 }
 279 
 280 void TenuredGeneration::record_spaces_top() {
 281   assert(ZapUnusedHeapArea, "Not mangling unused space");
 282   _the_space->set_top_for_allocations();
 283 }
 284 
 285 void TenuredGeneration::verify() {
 286   _the_space->verify();
 287 }
 288 
 289 void TenuredGeneration::print_on(outputStream* st)  const {
 290   Generation::print_on(st);
 291   st->print("   the");
 292   _the_space->print_on(st);
 293 }