1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/genMarkSweep.hpp"
  27 #include "gc/serial/tenuredGeneration.inline.hpp"
  28 #include "gc/shared/blockOffsetTable.inline.hpp"
  29 #include "gc/shared/cardGeneration.inline.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/genOopClosures.inline.hpp"
  33 #include "gc/shared/generationSpec.hpp"
  34 #include "gc/shared/space.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/java.hpp"
  39 #include "utilities/macros.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc/cms/parOopClosures.hpp"
  42 #endif
  43 
  44 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  45                                      size_t initial_byte_size,
  46                                      CardTableRS* remset) :
  47   CardGeneration(rs, initial_byte_size, remset)
  48 {
  49   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  50   HeapWord* end    = (HeapWord*) _virtual_space.high();
  51   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  52   _the_space->reset_saved_mark();
  53   _shrink_factor = 0;
  54   _capacity_at_prologue = 0;
  55 
  56   _gc_stats = new GCStats();
  57 
  58   // initialize performance counters
  59 
  60   const char* gen_name = "old";
  61   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
  62   // Generation Counters -- generation 1, 1 subspace
  63   _gen_counters = new GenerationCounters(gen_name, 1, 1,
  64       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
  65 
  66   _gc_counters = new CollectorCounters("MSC", 1);
  67 
  68   _space_counters = new CSpaceCounters(gen_name, 0,
  69                                        _virtual_space.reserved_size(),
  70                                        _the_space, _gen_counters);
  71 }
  72 
  73 void TenuredGeneration::gc_prologue(bool full) {
  74   _capacity_at_prologue = capacity();
  75   _used_at_prologue = used();
  76 }
  77 
  78 bool TenuredGeneration::should_collect(bool  full,
  79                                        size_t size,
  80                                        bool   is_tlab) {
  81   // This should be one big conditional or (||), but I want to be able to tell
  82   // why it returns what it returns (without re-evaluating the conditionals
  83   // in case they aren't idempotent), so I'm doing it this way.
  84   // DeMorgan says it's okay.
  85   if (full) {
  86     log_trace(gc)("TenuredGeneration::should_collect: because full");
  87     return true;
  88   }
  89   if (should_allocate(size, is_tlab)) {
  90     log_trace(gc)("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT ")", size);
  91     return true;
  92   }
  93   // If we don't have very much free space.
  94   // XXX: 10000 should be a percentage of the capacity!!!
  95   if (free() < 10000) {
  96     log_trace(gc)("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT, free());
  97     return true;
  98   }
  99   // If we had to expand to accommodate promotions from the young generation
 100   if (_capacity_at_prologue < capacity()) {
 101     log_trace(gc)("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 102         _capacity_at_prologue, capacity());
 103     return true;
 104   }
 105 
 106   return false;
 107 }
 108 
 109 void TenuredGeneration::compute_new_size() {
 110   assert_locked_or_safepoint(Heap_lock);
 111 
 112   // Compute some numbers about the state of the heap.
 113   const size_t used_after_gc = used();
 114   const size_t capacity_after_gc = capacity();
 115 
 116   CardGeneration::compute_new_size();
 117 
 118   assert(used() == used_after_gc && used_after_gc <= capacity(),
 119          "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 120          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity());
 121 }
 122 
 123 void TenuredGeneration::update_gc_stats(Generation* current_generation,
 124                                         bool full) {
 125   // If the young generation has been collected, gather any statistics
 126   // that are of interest at this point.
 127   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
 128   if (!full && current_is_young) {
 129     // Calculate size of data promoted from the young generation
 130     // before doing the collection.
 131     size_t used_before_gc = used();
 132 
 133     // If the young gen collection was skipped, then the
 134     // number of promoted bytes will be 0 and adding it to the
 135     // average will incorrectly lessen the average.  It is, however,
 136     // also possible that no promotion was needed.
 137     if (used_before_gc >= _used_at_prologue) {
 138       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 139       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 140     }
 141   }
 142 }
 143 
 144 void TenuredGeneration::update_counters() {
 145   if (UsePerfData) {
 146     _space_counters->update_all();
 147     _gen_counters->update_all();
 148   }
 149 }
 150 
 151 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 152   size_t available = max_contiguous_available();
 153   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 154   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 155 
 156   log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 157     res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 158 
 159   return res;
 160 }
 161 
 162 void TenuredGeneration::collect(bool   full,
 163                                 bool   clear_all_soft_refs,
 164                                 size_t size,
 165                                 bool   is_tlab) {
 166   GenCollectedHeap* gch = GenCollectedHeap::heap();
 167 
 168   // Temporarily expand the span of our ref processor, so
 169   // refs discovery is over the entire heap, not just this generation
 170   ReferenceProcessorSpanMutator
 171     x(ref_processor(), gch->reserved_region());
 172 
 173   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
 174   gc_timer->register_gc_start();
 175 
 176   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
 177   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 178 
 179   gch->pre_full_gc_dump(gc_timer);
 180 
 181   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
 182 
 183   gch->post_full_gc_dump(gc_timer);
 184 
 185   gc_timer->register_gc_end();
 186 
 187   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 188 }
 189 
 190 HeapWord*
 191 TenuredGeneration::expand_and_allocate(size_t word_size,
 192                                        bool is_tlab,
 193                                        bool parallel) {
 194   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
 195   if (parallel) {
 196     MutexLocker x(ParGCRareEvent_lock);
 197     HeapWord* result = NULL;
 198     size_t byte_size = word_size * HeapWordSize;
 199     while (true) {
 200       expand(byte_size, _min_heap_delta_bytes);
 201       if (GCExpandToAllocateDelayMillis > 0) {
 202         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 203       }
 204       result = _the_space->par_allocate(word_size);
 205       if ( result != NULL) {
 206         return result;
 207       } else {
 208         // If there's not enough expansion space available, give up.
 209         if (_virtual_space.uncommitted_size() < byte_size) {
 210           return NULL;
 211         }
 212         // else try again
 213       }
 214     }
 215   } else {
 216     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 217     return _the_space->allocate(word_size);
 218   }
 219 }
 220 
 221 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 222   GCMutexLocker x(ExpandHeap_lock);
 223   return CardGeneration::expand(bytes, expand_bytes);
 224 }
 225 
 226 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
 227   return _the_space->free();
 228 }
 229 
 230 size_t TenuredGeneration::contiguous_available() const {
 231   return _the_space->free() + _virtual_space.uncommitted_size();
 232 }
 233 
 234 void TenuredGeneration::assert_correct_size_change_locking() {
 235   assert_locked_or_safepoint(ExpandHeap_lock);
 236 }
 237 
 238 // Currently nothing to do.
 239 void TenuredGeneration::prepare_for_verify() {}
 240 
 241 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
 242   _the_space->object_iterate(blk);
 243 }
 244 
 245 void TenuredGeneration::save_marks() {
 246   _the_space->set_saved_mark();
 247 }
 248 
 249 void TenuredGeneration::reset_saved_marks() {
 250   _the_space->reset_saved_mark();
 251 }
 252 
 253 bool TenuredGeneration::no_allocs_since_save_marks() {
 254   return _the_space->saved_mark_at_top();
 255 }
 256 
 257 #define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
 258                                                                                 \
 259 void TenuredGeneration::                                                        \
 260 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 261   blk->set_generation(this);                                                    \
 262   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 263   blk->reset_generation();                                                      \
 264   save_marks();                                                                 \
 265 }
 266 
 267 ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
 268 
 269 #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
 270 
 271 void TenuredGeneration::gc_epilogue(bool full) {
 272   // update the generation and space performance counters
 273   update_counters();
 274   if (ZapUnusedHeapArea) {
 275     _the_space->check_mangled_unused_area_complete();
 276   }
 277 }
 278 
 279 void TenuredGeneration::record_spaces_top() {
 280   assert(ZapUnusedHeapArea, "Not mangling unused space");
 281   _the_space->set_top_for_allocations();
 282 }
 283 
 284 void TenuredGeneration::verify() {
 285   _the_space->verify();
 286 }
 287 
 288 void TenuredGeneration::print_on(outputStream* st)  const {
 289   Generation::print_on(st);
 290   st->print("   the");
 291   _the_space->print_on(st);
 292 }