1 /*
   2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 class AdjoiningGenerations;
  26 class GCTaskManager;
  27 class PSAdaptiveSizePolicy;
  28 
  29 class ParallelScavengeHeap : public CollectedHeap {
  30   friend class VMStructs;
  31  private:
  32   static PSYoungGen* _young_gen;
  33   static PSOldGen*   _old_gen;
  34   static PSPermGen*  _perm_gen;
  35 
  36   // Sizing policy for entire heap
  37   static PSAdaptiveSizePolicy* _size_policy;
  38   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
  39 
  40   static ParallelScavengeHeap* _psh;
  41 
  42   size_t _perm_gen_alignment;
  43   size_t _young_gen_alignment;
  44   size_t _old_gen_alignment;
  45 
  46   inline size_t set_alignment(size_t& var, size_t val);
  47 
  48   // Collection of generations that are adjacent in the
  49   // space reserved for the heap.
  50   AdjoiningGenerations* _gens;
  51 
  52   static GCTaskManager*          _gc_task_manager;      // The task manager.
  53 
  54  protected:
  55   static inline size_t total_invocations();
  56   HeapWord* allocate_new_tlab(size_t size);
  57 
  58  public:
  59   ParallelScavengeHeap() : CollectedHeap() {
  60     set_alignment(_perm_gen_alignment, intra_heap_alignment());
  61     set_alignment(_young_gen_alignment, intra_heap_alignment());
  62     set_alignment(_old_gen_alignment, intra_heap_alignment());
  63   }
  64 
  65   // For use by VM operations
  66   enum CollectionType {
  67     Scavenge,
  68     MarkSweep
  69   };
  70 
  71   ParallelScavengeHeap::Name kind() const {
  72     return CollectedHeap::ParallelScavengeHeap;
  73   }
  74 
  75   static PSYoungGen* young_gen()     { return _young_gen; }
  76   static PSOldGen* old_gen()         { return _old_gen; }
  77   static PSPermGen* perm_gen()       { return _perm_gen; }
  78 
  79   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
  80 
  81   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
  82 
  83   static ParallelScavengeHeap* heap();
  84 
  85   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
  86 
  87   AdjoiningGenerations* gens() { return _gens; }
  88 
  89   // Returns JNI_OK on success
  90   virtual jint initialize();
  91 
  92   void post_initialize();
  93   void update_counters();
  94   // The alignment used for the various generations.
  95   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
  96   size_t young_gen_alignment() const { return _young_gen_alignment; }
  97   size_t old_gen_alignment()  const { return _old_gen_alignment; }
  98 
  99   // The alignment used for eden and survivors within the young gen
 100   // and for boundary between young gen and old gen.
 101   size_t intra_heap_alignment() const { return 64 * K; }
 102 
 103   size_t capacity() const;
 104   size_t used() const;
 105 
 106   // Return "true" if all generations (but perm) have reached the
 107   // maximal committed limit that they can reach, without a garbage
 108   // collection.
 109   virtual bool is_maximal_no_gc() const;
 110 
 111   // Does this heap support heap inspection? (+PrintClassHistogram)
 112   bool supports_heap_inspection() const { return true; }
 113 
 114   size_t permanent_capacity() const;
 115   size_t permanent_used() const;
 116 
 117   size_t max_capacity() const;
 118 
 119   // Whether p is in the allocated part of the heap
 120   bool is_in(const void* p) const;
 121 
 122   bool is_in_reserved(const void* p) const;
 123   bool is_in_permanent(const void *p) const {    // reserved part
 124     return perm_gen()->reserved().contains(p);
 125   }
 126 
 127   bool is_permanent(const void *p) const {    // committed part
 128     return perm_gen()->is_in(p);
 129   }
 130 
 131   inline bool is_in_young(oop p);        // reserved part
 132   inline bool is_in_old_or_perm(oop p);  // reserved part
 133 
 134   // Memory allocation.   "gc_time_limit_was_exceeded" will
 135   // be set to true if the adaptive size policy determine that
 136   // an excessive amount of time is being spent doing collections
 137   // and caused a NULL to be returned.  If a NULL is not returned,
 138   // "gc_time_limit_was_exceeded" has an undefined meaning.
 139 
 140   HeapWord* mem_allocate(size_t size,
 141                          bool is_noref,
 142                          bool is_tlab,
 143                          bool* gc_overhead_limit_was_exceeded);
 144   HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
 145 
 146   HeapWord* permanent_mem_allocate(size_t size);
 147   HeapWord* failed_permanent_mem_allocate(size_t size);
 148 
 149   // Support for System.gc()
 150   void collect(GCCause::Cause cause);
 151 
 152   // This interface assumes that it's being called by the
 153   // vm thread. It collects the heap assuming that the
 154   // heap lock is already held and that we are executing in
 155   // the context of the vm thread.
 156   void collect_as_vm_thread(GCCause::Cause cause);
 157 
 158   // These also should be called by the vm thread at a safepoint (e.g., from a
 159   // VM operation).
 160   //
 161   // The first collects the young generation only, unless the scavenge fails; it
 162   // will then attempt a full gc.  The second collects the entire heap; if
 163   // maximum_compaction is true, it will compact everything and clear all soft
 164   // references.
 165   inline void invoke_scavenge();
 166   inline void invoke_full_gc(bool maximum_compaction);
 167 
 168   size_t large_typearray_limit() { return FastAllocateSizeLimit; }
 169 
 170   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 171 
 172   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
 173   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
 174 
 175   void ensure_parsability(bool retire_tlabs);
 176   void accumulate_statistics_all_tlabs();
 177   void resize_all_tlabs();
 178 
 179   size_t unsafe_max_alloc();
 180 
 181   bool supports_tlab_allocation() const { return true; }
 182 
 183   size_t tlab_capacity(Thread* thr) const;
 184   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 185 
 186   // Can a compiler initialize a new object without store barriers?
 187   // This permission only extends from the creation of a new object
 188   // via a TLAB up to the first subsequent safepoint.
 189   virtual bool can_elide_tlab_store_barriers() const {
 190     return true;
 191   }
 192 
 193   virtual bool card_mark_must_follow_store() const {
 194     return false;
 195   }
 196 
 197   // Return true if we don't we need a store barrier for
 198   // initializing stores to an object at this address.
 199   virtual bool can_elide_initializing_store_barrier(oop new_obj);
 200 
 201   // Can a compiler elide a store barrier when it writes
 202   // a permanent oop into the heap?  Applies when the compiler
 203   // is storing x to the heap, where x->is_perm() is true.
 204   virtual bool can_elide_permanent_oop_store_barriers() const {
 205     return true;
 206   }
 207 
 208   void oop_iterate(OopClosure* cl);
 209   void object_iterate(ObjectClosure* cl);
 210   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
 211   void permanent_oop_iterate(OopClosure* cl);
 212   void permanent_object_iterate(ObjectClosure* cl);
 213 
 214   HeapWord* block_start(const void* addr) const;
 215   size_t block_size(const HeapWord* addr) const;
 216   bool block_is_obj(const HeapWord* addr) const;
 217 
 218   jlong millis_since_last_gc();
 219 
 220   void prepare_for_verify();
 221   void print() const;
 222   void print_on(outputStream* st) const;
 223   virtual void print_gc_threads_on(outputStream* st) const;
 224   virtual void gc_threads_do(ThreadClosure* tc) const;
 225   virtual void print_tracing_info() const;
 226 
 227   void verify(bool allow_dirty, bool silent, bool /* option */);
 228 
 229   void print_heap_change(size_t prev_used);
 230 
 231   // Resize the young generation.  The reserved space for the
 232   // generation may be expanded in preparation for the resize.
 233   void resize_young_gen(size_t eden_size, size_t survivor_size);
 234 
 235   // Resize the old generation.  The reserved space for the
 236   // generation may be expanded in preparation for the resize.
 237   void resize_old_gen(size_t desired_free_space);
 238 
 239   // Save the tops of the spaces in all generations
 240   void record_gen_tops_before_GC() PRODUCT_RETURN;
 241 
 242   // Mangle the unused parts of all spaces in the heap
 243   void gen_mangle_unused_area() PRODUCT_RETURN;
 244 
 245   // Try to shrink the heap based on the free ratio
 246   bool try_to_shrink_by_free_ratio(bool isFullGC);
 247 
 248   // Call these in sequential code around the processing of strong roots.
 249   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 250   public:
 251     ParStrongRootsScope();
 252     ~ParStrongRootsScope();
 253   };
 254 };
 255 
 256 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
 257 {
 258   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
 259   var = round_to(val, intra_heap_alignment());
 260   return var;
 261 }