1 /*
   2  * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 class AdjoiningGenerations;
  26 class GCTaskManager;
  27 class PSAdaptiveSizePolicy;
  28 class GenerationSizer;
  29 class CollectorPolicy;
  30 
  31 class ParallelScavengeHeap : public CollectedHeap {
  32   friend class VMStructs;
  33  private:
  34   static PSYoungGen* _young_gen;
  35   static PSOldGen*   _old_gen;
  36   static PSPermGen*  _perm_gen;
  37 
  38   // Sizing policy for entire heap
  39   static PSAdaptiveSizePolicy* _size_policy;
  40   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
  41 
  42   static ParallelScavengeHeap* _psh;
  43 
  44   size_t _perm_gen_alignment;
  45   size_t _young_gen_alignment;
  46   size_t _old_gen_alignment;
  47 
  48   GenerationSizer* _collector_policy;
  49 
  50   inline size_t set_alignment(size_t& var, size_t val);
  51 
  52   // Collection of generations that are adjacent in the
  53   // space reserved for the heap.
  54   AdjoiningGenerations* _gens;
  55 
  56   static GCTaskManager*          _gc_task_manager;      // The task manager.
  57 
  58  protected:
  59   static inline size_t total_invocations();
  60   HeapWord* allocate_new_tlab(size_t size);
  61 
  62  public:
  63   ParallelScavengeHeap() : CollectedHeap() {
  64     set_alignment(_perm_gen_alignment, intra_heap_alignment());
  65     set_alignment(_young_gen_alignment, intra_heap_alignment());
  66     set_alignment(_old_gen_alignment, intra_heap_alignment());
  67   }
  68 
  69   // For use by VM operations
  70   enum CollectionType {
  71     Scavenge,
  72     MarkSweep
  73   };
  74 
  75   ParallelScavengeHeap::Name kind() const {
  76     return CollectedHeap::ParallelScavengeHeap;
  77   }
  78 
  79 CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
  80   // GenerationSizer* collector_policy() const { return _collector_policy; }
  81 
  82   static PSYoungGen* young_gen()     { return _young_gen; }
  83   static PSOldGen* old_gen()         { return _old_gen; }
  84   static PSPermGen* perm_gen()       { return _perm_gen; }
  85 
  86   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
  87 
  88   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
  89 
  90   static ParallelScavengeHeap* heap();
  91 
  92   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
  93 
  94   AdjoiningGenerations* gens() { return _gens; }
  95 
  96   // Returns JNI_OK on success
  97   virtual jint initialize();
  98 
  99   void post_initialize();
 100   void update_counters();
 101   // The alignment used for the various generations.
 102   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
 103   size_t young_gen_alignment() const { return _young_gen_alignment; }
 104   size_t old_gen_alignment()  const { return _old_gen_alignment; }
 105 
 106   // The alignment used for eden and survivors within the young gen
 107   // and for boundary between young gen and old gen.
 108   size_t intra_heap_alignment() const { return 64 * K; }
 109 
 110   size_t capacity() const;
 111   size_t used() const;
 112 
 113   // Return "true" if all generations (but perm) have reached the
 114   // maximal committed limit that they can reach, without a garbage
 115   // collection.
 116   virtual bool is_maximal_no_gc() const;
 117 
 118   // Does this heap support heap inspection? (+PrintClassHistogram)
 119   bool supports_heap_inspection() const { return true; }
 120 
 121   size_t permanent_capacity() const;
 122   size_t permanent_used() const;
 123 
 124   size_t max_capacity() const;
 125 
 126   // Whether p is in the allocated part of the heap
 127   bool is_in(const void* p) const;
 128 
 129   bool is_in_reserved(const void* p) const;
 130   bool is_in_permanent(const void *p) const {    // reserved part
 131     return perm_gen()->reserved().contains(p);
 132   }
 133 
 134   bool is_permanent(const void *p) const {    // committed part
 135     return perm_gen()->is_in(p);
 136   }
 137 
 138   inline bool is_in_young(oop p);        // reserved part
 139   inline bool is_in_old_or_perm(oop p);  // reserved part
 140 
 141   // Memory allocation.   "gc_time_limit_was_exceeded" will
 142   // be set to true if the adaptive size policy determine that
 143   // an excessive amount of time is being spent doing collections
 144   // and caused a NULL to be returned.  If a NULL is not returned,
 145   // "gc_time_limit_was_exceeded" has an undefined meaning.
 146 
 147   HeapWord* mem_allocate(size_t size,
 148                          bool is_noref,
 149                          bool is_tlab,
 150                          bool* gc_overhead_limit_was_exceeded);
 151   HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
 152 
 153   HeapWord* permanent_mem_allocate(size_t size);
 154   HeapWord* failed_permanent_mem_allocate(size_t size);
 155 
 156   // Support for System.gc()
 157   void collect(GCCause::Cause cause);
 158 
 159   // This interface assumes that it's being called by the
 160   // vm thread. It collects the heap assuming that the
 161   // heap lock is already held and that we are executing in
 162   // the context of the vm thread.
 163   void collect_as_vm_thread(GCCause::Cause cause);
 164 
 165   // These also should be called by the vm thread at a safepoint (e.g., from a
 166   // VM operation).
 167   //
 168   // The first collects the young generation only, unless the scavenge fails; it
 169   // will then attempt a full gc.  The second collects the entire heap; if
 170   // maximum_compaction is true, it will compact everything and clear all soft
 171   // references.
 172   inline void invoke_scavenge();
 173   inline void invoke_full_gc(bool maximum_compaction);
 174 
 175   size_t large_typearray_limit() { return FastAllocateSizeLimit; }
 176 
 177   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 178 
 179   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
 180   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
 181 
 182   void ensure_parsability(bool retire_tlabs);
 183   void accumulate_statistics_all_tlabs();
 184   void resize_all_tlabs();
 185 
 186   size_t unsafe_max_alloc();
 187 
 188   bool supports_tlab_allocation() const { return true; }
 189 
 190   size_t tlab_capacity(Thread* thr) const;
 191   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 192 
 193   // Can a compiler initialize a new object without store barriers?
 194   // This permission only extends from the creation of a new object
 195   // via a TLAB up to the first subsequent safepoint.
 196   virtual bool can_elide_tlab_store_barriers() const {
 197     return true;
 198   }
 199 
 200   virtual bool card_mark_must_follow_store() const {
 201     return false;
 202   }
 203 
 204   // Return true if we don't we need a store barrier for
 205   // initializing stores to an object at this address.
 206   virtual bool can_elide_initializing_store_barrier(oop new_obj);
 207 
 208   // Can a compiler elide a store barrier when it writes
 209   // a permanent oop into the heap?  Applies when the compiler
 210   // is storing x to the heap, where x->is_perm() is true.
 211   virtual bool can_elide_permanent_oop_store_barriers() const {
 212     return true;
 213   }
 214 
 215   void oop_iterate(OopClosure* cl);
 216   void object_iterate(ObjectClosure* cl);
 217   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
 218   void permanent_oop_iterate(OopClosure* cl);
 219   void permanent_object_iterate(ObjectClosure* cl);
 220 
 221   HeapWord* block_start(const void* addr) const;
 222   size_t block_size(const HeapWord* addr) const;
 223   bool block_is_obj(const HeapWord* addr) const;
 224 
 225   jlong millis_since_last_gc();
 226 
 227   void prepare_for_verify();
 228   void print() const;
 229   void print_on(outputStream* st) const;
 230   virtual void print_gc_threads_on(outputStream* st) const;
 231   virtual void gc_threads_do(ThreadClosure* tc) const;
 232   virtual void print_tracing_info() const;
 233 
 234   void verify(bool allow_dirty, bool silent, bool /* option */);
 235 
 236   void print_heap_change(size_t prev_used);
 237 
 238   // Resize the young generation.  The reserved space for the
 239   // generation may be expanded in preparation for the resize.
 240   void resize_young_gen(size_t eden_size, size_t survivor_size);
 241 
 242   // Resize the old generation.  The reserved space for the
 243   // generation may be expanded in preparation for the resize.
 244   void resize_old_gen(size_t desired_free_space);
 245 
 246   // Save the tops of the spaces in all generations
 247   void record_gen_tops_before_GC() PRODUCT_RETURN;
 248 
 249   // Mangle the unused parts of all spaces in the heap
 250   void gen_mangle_unused_area() PRODUCT_RETURN;
 251 
 252   // Call these in sequential code around the processing of strong roots.
 253   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 254   public:
 255     ParStrongRootsScope();
 256     ~ParStrongRootsScope();
 257   };
 258 };
 259 
 260 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
 261 {
 262   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
 263   var = round_to(val, intra_heap_alignment());
 264   return var;
 265 }