1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/genMarkSweep.hpp"
  27 #include "gc/serial/tenuredGeneration.inline.hpp"
  28 #include "gc/shared/blockOffsetTable.inline.hpp"
  29 #include "gc/shared/cardGeneration.inline.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/genCollectedHeap.hpp"
  34 #include "gc/shared/genOopClosures.inline.hpp"
  35 #include "gc/shared/generationSpec.hpp"
  36 #include "gc/shared/space.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/java.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_CMSGC
  43 #include "gc/cms/parOopClosures.hpp"
  44 #endif
  45 
  46 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
  47                                      size_t initial_byte_size,
  48                                      CardTableRS* remset) :
  49   CardGeneration(rs, initial_byte_size, remset)
  50 {
  51   HeapWord* bottom = (HeapWord*) _virtual_space.low();
  52   HeapWord* end    = (HeapWord*) _virtual_space.high();
  53   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  54   _the_space->reset_saved_mark();
  55   _shrink_factor = 0;
  56   _capacity_at_prologue = 0;
  57 
  58   _gc_stats = new GCStats();
  59 
  60   // initialize performance counters
  61 
  62   const char* gen_name = "old";
  63   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
  64   // Generation Counters -- generation 1, 1 subspace
  65   _gen_counters = new GenerationCounters(gen_name, 1, 1,
  66       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
  67 
  68   _gc_counters = new CollectorCounters("MSC", 1);
  69 
  70   _space_counters = new CSpaceCounters(gen_name, 0,
  71                                        _virtual_space.reserved_size(),
  72                                        _the_space, _gen_counters);
  73 }
  74 
  75 void TenuredGeneration::gc_prologue(bool full) {
  76   _capacity_at_prologue = capacity();
  77   _used_at_prologue = used();
  78 }
  79 
  80 bool TenuredGeneration::should_collect(bool  full,
  81                                        size_t size,
  82                                        bool   is_tlab) {
  83   // This should be one big conditional or (||), but I want to be able to tell
  84   // why it returns what it returns (without re-evaluating the conditionals
  85   // in case they aren't idempotent), so I'm doing it this way.
  86   // DeMorgan says it's okay.
  87   if (full) {
  88     log_trace(gc)("TenuredGeneration::should_collect: because full");
  89     return true;
  90   }
  91   if (should_allocate(size, is_tlab)) {
  92     log_trace(gc)("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT ")", size);
  93     return true;
  94   }
  95   // If we don't have very much free space.
  96   // XXX: 10000 should be a percentage of the capacity!!!
  97   if (free() < 10000) {
  98     log_trace(gc)("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT, free());
  99     return true;
 100   }
 101   // If we had to expand to accommodate promotions from the young generation
 102   if (_capacity_at_prologue < capacity()) {
 103     log_trace(gc)("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
 104         _capacity_at_prologue, capacity());
 105     return true;
 106   }
 107 
 108   return false;
 109 }
 110 
 111 void TenuredGeneration::compute_new_size() {
 112   assert_locked_or_safepoint(Heap_lock);
 113 
 114   // Compute some numbers about the state of the heap.
 115   const size_t used_after_gc = used();
 116   const size_t capacity_after_gc = capacity();
 117 
 118   CardGeneration::compute_new_size();
 119 
 120   assert(used() == used_after_gc && used_after_gc <= capacity(),
 121          "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 122          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity());
 123 }
 124 
 125 void TenuredGeneration::update_gc_stats(Generation* current_generation,
 126                                         bool full) {
 127   // If the young generation has been collected, gather any statistics
 128   // that are of interest at this point.
 129   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
 130   if (!full && current_is_young) {
 131     // Calculate size of data promoted from the young generation
 132     // before doing the collection.
 133     size_t used_before_gc = used();
 134 
 135     // If the young gen collection was skipped, then the
 136     // number of promoted bytes will be 0 and adding it to the
 137     // average will incorrectly lessen the average.  It is, however,
 138     // also possible that no promotion was needed.
 139     if (used_before_gc >= _used_at_prologue) {
 140       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
 141       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
 142     }
 143   }
 144 }
 145 
 146 void TenuredGeneration::update_counters() {
 147   if (UsePerfData) {
 148     _space_counters->update_all();
 149     _gen_counters->update_all();
 150   }
 151 }
 152 
 153 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 154   size_t available = max_contiguous_available();
 155   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 156   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 157 
 158   log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 159     res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 160 
 161   return res;
 162 }
 163 
 164 void TenuredGeneration::collect(bool   full,
 165                                 bool   clear_all_soft_refs,
 166                                 size_t size,
 167                                 bool   is_tlab) {
 168   GenCollectedHeap* gch = GenCollectedHeap::heap();
 169 
 170   // Temporarily expand the span of our ref processor, so
 171   // refs discovery is over the entire heap, not just this generation
 172   ReferenceProcessorSpanMutator
 173     x(ref_processor(), gch->reserved_region());
 174 
 175   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
 176   gc_timer->register_gc_start();
 177 
 178   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
 179   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 180 
 181   gch->pre_full_gc_dump(gc_timer);
 182 
 183   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
 184 
 185   gch->post_full_gc_dump(gc_timer);
 186 
 187   gc_timer->register_gc_end();
 188 
 189   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 190 }
 191 
 192 HeapWord*
 193 TenuredGeneration::expand_and_allocate(size_t word_size,
 194                                        bool is_tlab,
 195                                        bool parallel) {
 196   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
 197   if (parallel) {
 198     MutexLocker x(ParGCRareEvent_lock);
 199     HeapWord* result = NULL;
 200     size_t byte_size = word_size * HeapWordSize;
 201     while (true) {
 202       expand(byte_size, _min_heap_delta_bytes);
 203       if (GCExpandToAllocateDelayMillis > 0) {
 204         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 205       }
 206       result = _the_space->par_allocate(word_size);
 207       if ( result != NULL) {
 208         return result;
 209       } else {
 210         // If there's not enough expansion space available, give up.
 211         if (_virtual_space.uncommitted_size() < byte_size) {
 212           return NULL;
 213         }
 214         // else try again
 215       }
 216     }
 217   } else {
 218     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 219     return _the_space->allocate(word_size);
 220   }
 221 }
 222 
 223 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 224   GCMutexLocker x(ExpandHeap_lock);
 225   return CardGeneration::expand(bytes, expand_bytes);
 226 }
 227 
 228 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
 229   return _the_space->free();
 230 }
 231 
 232 size_t TenuredGeneration::contiguous_available() const {
 233   return _the_space->free() + _virtual_space.uncommitted_size();
 234 }
 235 
 236 void TenuredGeneration::assert_correct_size_change_locking() {
 237   assert_locked_or_safepoint(ExpandHeap_lock);
 238 }
 239 
 240 // Currently nothing to do.
 241 void TenuredGeneration::prepare_for_verify() {}
 242 
 243 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
 244   _the_space->object_iterate(blk);
 245 }
 246 
 247 void TenuredGeneration::save_marks() {
 248   _the_space->set_saved_mark();
 249 }
 250 
 251 void TenuredGeneration::reset_saved_marks() {
 252   _the_space->reset_saved_mark();
 253 }
 254 
 255 bool TenuredGeneration::no_allocs_since_save_marks() {
 256   return _the_space->saved_mark_at_top();
 257 }
 258 
 259 void TenuredGeneration::gc_epilogue(bool full) {
 260   // update the generation and space performance counters
 261   update_counters();
 262   if (ZapUnusedHeapArea) {
 263     _the_space->check_mangled_unused_area_complete();
 264   }
 265 }
 266 
 267 void TenuredGeneration::record_spaces_top() {
 268   assert(ZapUnusedHeapArea, "Not mangling unused space");
 269   _the_space->set_top_for_allocations();
 270 }
 271 
 272 void TenuredGeneration::verify() {
 273   _the_space->verify();
 274 }
 275 
 276 void TenuredGeneration::print_on(outputStream* st)  const {
 277   Generation::print_on(st);
 278   st->print("   the");
 279   _the_space->print_on(st);
 280 }