1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
  27 
  28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
  29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
  30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  31 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
  32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  33 #include "gc_implementation/shared/gcWhen.hpp"
  34 #include "gc_interface/collectedHeap.inline.hpp"
  35 #include "utilities/ostream.hpp"
  36 
  37 class AdjoiningGenerations;
  38 class CollectorPolicy;
  39 class GCHeapSummary;
  40 class GCTaskManager;
  41 class GenerationSizer;
  42 class CollectorPolicy;
  43 class PSAdaptiveSizePolicy;
  44 class PSHeapSummary;
  45 
  46 class ParallelScavengeHeap : public CollectedHeap {
  47   friend class VMStructs;
  48  private:
  49   static PSYoungGen* _young_gen;
  50   static PSOldGen*   _old_gen;
  51 
  52   // Sizing policy for entire heap
  53   static PSAdaptiveSizePolicy* _size_policy;
  54   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
  55 
  56   static ParallelScavengeHeap* _psh;
  57 
  58   size_t _young_gen_alignment;
  59   size_t _old_gen_alignment;
  60 
  61   GenerationSizer* _collector_policy;
  62 
  63   inline size_t set_alignment(size_t& var, size_t val);
  64 
  65   // Collection of generations that are adjacent in the
  66   // space reserved for the heap.
  67   AdjoiningGenerations* _gens;
  68   unsigned int _death_march_count;
  69 
  70   static GCTaskManager*          _gc_task_manager;      // The task manager.
  71 
  72   void trace_heap(GCWhen::Type when, GCTracer* tracer);
  73 
  74  protected:
  75   static inline size_t total_invocations();
  76   HeapWord* allocate_new_tlab(size_t size);
  77 
  78   inline bool should_alloc_in_eden(size_t size) const;
  79   inline void death_march_check(HeapWord* const result, size_t size);
  80   HeapWord* mem_allocate_old_gen(size_t size);
  81 
  82  public:
  83   ParallelScavengeHeap() : CollectedHeap() {
  84     _death_march_count = 0;
  85     set_alignment(_young_gen_alignment, intra_heap_alignment());
  86     set_alignment(_old_gen_alignment, intra_heap_alignment());
  87   }
  88 
  89   // Return the (conservative) maximum heap alignment
  90   static size_t conservative_max_heap_alignment() {
  91     return intra_heap_alignment();
  92   }
  93 
  94   // For use by VM operations
  95   enum CollectionType {
  96     Scavenge,
  97     MarkSweep
  98   };
  99 
 100   ParallelScavengeHeap::Name kind() const {
 101     return CollectedHeap::ParallelScavengeHeap;
 102   }
 103 
 104   virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
 105 
 106   static PSYoungGen* young_gen()     { return _young_gen; }
 107   static PSOldGen* old_gen()         { return _old_gen; }
 108 
 109   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
 110 
 111   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
 112 
 113   static ParallelScavengeHeap* heap();
 114 
 115   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
 116 
 117   AdjoiningGenerations* gens() { return _gens; }
 118 
 119   // Returns JNI_OK on success
 120   virtual jint initialize();
 121 
 122   void post_initialize();
 123   void update_counters();
 124   // The alignment used for the various generations.
 125   size_t young_gen_alignment() const { return _young_gen_alignment; }
 126   size_t old_gen_alignment()  const { return _old_gen_alignment; }
 127 
 128   // The alignment used for eden and survivors within the young gen
 129   // and for boundary between young gen and old gen.
 130   static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
 131 
 132   size_t capacity() const;
 133   size_t used() const;
 134 
 135   // Return "true" if all generations have reached the
 136   // maximal committed limit that they can reach, without a garbage
 137   // collection.
 138   virtual bool is_maximal_no_gc() const;
 139 
 140   // Return true if the reference points to an object that
 141   // can be moved in a partial collection.  For currently implemented
 142   // generational collectors that means during a collection of
 143   // the young gen.
 144   virtual bool is_scavengable(const void* addr);
 145 
 146   // Does this heap support heap inspection? (+PrintClassHistogram)
 147   bool supports_heap_inspection() const { return true; }
 148 
 149   size_t max_capacity() const;
 150 
 151   // Whether p is in the allocated part of the heap
 152   bool is_in(const void* p) const;
 153 
 154   bool is_in_reserved(const void* p) const;
 155 
 156 #ifdef ASSERT
 157   virtual bool is_in_partial_collection(const void *p);
 158 #endif
 159 
 160   bool is_in_young(oop p);        // reserved part
 161   bool is_in_old(oop p);          // reserved part
 162 
 163   // Memory allocation.   "gc_time_limit_was_exceeded" will
 164   // be set to true if the adaptive size policy determine that
 165   // an excessive amount of time is being spent doing collections
 166   // and caused a NULL to be returned.  If a NULL is not returned,
 167   // "gc_time_limit_was_exceeded" has an undefined meaning.
 168   HeapWord* mem_allocate(size_t size,
 169                          bool* gc_overhead_limit_was_exceeded);
 170 
 171   // Allocation attempt(s) during a safepoint. It should never be called
 172   // to allocate a new TLAB as this allocation might be satisfied out
 173   // of the old generation.
 174   HeapWord* failed_mem_allocate(size_t size);
 175 
 176   // Support for System.gc()
 177   void collect(GCCause::Cause cause);
 178 
 179   // These also should be called by the vm thread at a safepoint (e.g., from a
 180   // VM operation).
 181   //
 182   // The first collects the young generation only, unless the scavenge fails; it
 183   // will then attempt a full gc.  The second collects the entire heap; if
 184   // maximum_compaction is true, it will compact everything and clear all soft
 185   // references.
 186   inline void invoke_scavenge();
 187 
 188   // Perform a full collection
 189   virtual void do_full_collection(bool clear_all_soft_refs);
 190 
 191   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 192 
 193   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
 194   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
 195 
 196   void ensure_parsability(bool retire_tlabs);
 197   void accumulate_statistics_all_tlabs();
 198   void resize_all_tlabs();
 199 
 200   size_t unsafe_max_alloc();
 201 
 202   bool supports_tlab_allocation() const { return true; }
 203 
 204   size_t tlab_capacity(Thread* thr) const;
 205   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 206 
 207   // Can a compiler initialize a new object without store barriers?
 208   // This permission only extends from the creation of a new object
 209   // via a TLAB up to the first subsequent safepoint.
 210   virtual bool can_elide_tlab_store_barriers() const {
 211     return true;
 212   }
 213 
 214   virtual bool card_mark_must_follow_store() const {
 215     return false;
 216   }
 217 
 218   // Return true if we don't we need a store barrier for
 219   // initializing stores to an object at this address.
 220   virtual bool can_elide_initializing_store_barrier(oop new_obj);
 221 
 222   void oop_iterate(ExtendedOopClosure* cl);
 223   void object_iterate(ObjectClosure* cl);
 224   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
 225 
 226   HeapWord* block_start(const void* addr) const;
 227   size_t block_size(const HeapWord* addr) const;
 228   bool block_is_obj(const HeapWord* addr) const;
 229 
 230   jlong millis_since_last_gc();
 231 
 232   void prepare_for_verify();
 233   PSHeapSummary create_ps_heap_summary();
 234   virtual void print_on(outputStream* st) const;
 235   virtual void print_on_error(outputStream* st) const;
 236   virtual void print_gc_threads_on(outputStream* st) const;
 237   virtual void gc_threads_do(ThreadClosure* tc) const;
 238   virtual void print_tracing_info() const;
 239 
 240   void verify(bool silent, VerifyOption option /* ignored */);
 241 
 242   void print_heap_change(size_t prev_used);
 243 
 244   // Resize the young generation.  The reserved space for the
 245   // generation may be expanded in preparation for the resize.
 246   void resize_young_gen(size_t eden_size, size_t survivor_size);
 247 
 248   // Resize the old generation.  The reserved space for the
 249   // generation may be expanded in preparation for the resize.
 250   void resize_old_gen(size_t desired_free_space);
 251 
 252   // Save the tops of the spaces in all generations
 253   void record_gen_tops_before_GC() PRODUCT_RETURN;
 254 
 255   // Mangle the unused parts of all spaces in the heap
 256   void gen_mangle_unused_area() PRODUCT_RETURN;
 257 
 258   // Call these in sequential code around the processing of strong roots.
 259   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 260   public:
 261     ParStrongRootsScope();
 262     ~ParStrongRootsScope();
 263   };
 264 };
 265 
 266 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
 267 {
 268   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
 269   var = round_to(val, intra_heap_alignment());
 270   return var;
 271 }
 272 
 273 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP