1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)parallelScavengeHeap.hpp     1.62 07/10/04 10:49:30 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 class AdjoiningGenerations;
  29 class GCTaskManager;
  30 class PSAdaptiveSizePolicy;
  31 
  32 class ParallelScavengeHeap : public CollectedHeap {
  33   friend class VMStructs;
  34  private:
  35   static PSYoungGen* _young_gen;
  36   static PSOldGen*   _old_gen;
  37   static PSPermGen*  _perm_gen;
  38 
  39   // Sizing policy for entire heap
  40   static PSAdaptiveSizePolicy* _size_policy;
  41   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
  42 
  43   static ParallelScavengeHeap* _psh;
  44 
  45   size_t _perm_gen_alignment;
  46   size_t _young_gen_alignment;
  47   size_t _old_gen_alignment;
  48 
  49   inline size_t set_alignment(size_t& var, size_t val);
  50 
  51   // Collection of generations that are adjacent in the
  52   // space reserved for the heap.
  53   AdjoiningGenerations* _gens;
  54 
  55   static GCTaskManager*          _gc_task_manager;      // The task manager.
  56 
  57  protected:
  58   static inline size_t total_invocations();
  59   HeapWord* allocate_new_tlab(size_t size);
  60   void fill_all_tlabs(bool retire);
  61 
  62  public:
  63   ParallelScavengeHeap() : CollectedHeap() {
  64     set_alignment(_perm_gen_alignment, intra_generation_alignment());
  65     set_alignment(_young_gen_alignment, intra_generation_alignment());
  66     set_alignment(_old_gen_alignment, intra_generation_alignment());
  67   }
  68 
  69   // For use by VM operations
  70   enum CollectionType {
  71     Scavenge,
  72     MarkSweep
  73   };
  74 
  75   ParallelScavengeHeap::Name kind() const {
  76     return CollectedHeap::ParallelScavengeHeap;
  77   }
  78 
  79   static PSYoungGen* young_gen()     { return _young_gen; }
  80   static PSOldGen* old_gen()         { return _old_gen; }
  81   static PSPermGen* perm_gen()       { return _perm_gen; }
  82 
  83   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
  84 
  85   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
  86 
  87   static ParallelScavengeHeap* heap();
  88 
  89   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
  90 
  91   AdjoiningGenerations* gens() { return _gens; }
  92 
  93   // Returns JNI_OK on success
  94   virtual jint initialize();
  95 
  96   void post_initialize();
  97   void update_counters();
  98 
  99   // The alignment used for the various generations.
 100   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
 101   size_t young_gen_alignment() const { return _young_gen_alignment; }
 102   size_t old_gen_alignment()  const { return _old_gen_alignment; }
 103 
 104   // The alignment used for eden and survivors within the young gen.
 105   size_t intra_generation_alignment() const { return 64 * K; }
 106 
 107   size_t capacity() const;
 108   size_t used() const;
 109 
 110   // Return "true" if all generations (but perm) have reached the
 111   // maximal committed limit that they can reach, without a garbage
 112   // collection.
 113   virtual bool is_maximal_no_gc() const;
 114 
 115   // Does this heap support heap inspection? (+PrintClassHistogram)
 116   bool supports_heap_inspection() const { return true; }
 117 
 118   size_t permanent_capacity() const;
 119   size_t permanent_used() const;
 120 
 121   size_t max_capacity() const;
 122 
 123   // Whether p is in the allocated part of the heap
 124   bool is_in(const void* p) const;
 125 
 126   bool is_in_reserved(const void* p) const;
 127   bool is_in_permanent(const void *p) const {    // reserved part
 128     return perm_gen()->reserved().contains(p);
 129   }
 130 
 131   bool is_permanent(const void *p) const {    // committed part
 132     return perm_gen()->is_in(p);
 133   }
 134 
 135   static bool is_in_young(oop *p);        // reserved part
 136   static bool is_in_old_or_perm(oop *p);  // reserved part
 137 
 138   // Memory allocation.   "gc_time_limit_was_exceeded" will
 139   // be set to true if the adaptive size policy determine that
 140   // an excessive amount of time is being spent doing collections
 141   // and caused a NULL to be returned.  If a NULL is not returned,
 142   // "gc_time_limit_was_exceeded" has an undefined meaning.
 143 
 144   HeapWord* mem_allocate(size_t size, 
 145                          bool is_noref, 
 146                          bool is_tlab,
 147                          bool* gc_overhead_limit_was_exceeded);
 148   HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
 149 
 150   HeapWord* permanent_mem_allocate(size_t size);
 151   HeapWord* failed_permanent_mem_allocate(size_t size);
 152 
 153   // Support for System.gc()
 154   void collect(GCCause::Cause cause);
 155 
 156   // This interface assumes that it's being called by the
 157   // vm thread. It collects the heap assuming that the
 158   // heap lock is already held and that we are executing in
 159   // the context of the vm thread.
 160   void collect_as_vm_thread(GCCause::Cause cause);
 161 
 162   // These also should be called by the vm thread at a safepoint (e.g., from a
 163   // VM operation).
 164   // 
 165   // The first collects the young generation only, unless the scavenge fails; it
 166   // will then attempt a full gc.  The second collects the entire heap; if
 167   // maximum_compaction is true, it will compact everything and clear all soft
 168   // references.
 169   inline void invoke_scavenge();
 170   inline void invoke_full_gc(bool maximum_compaction);
 171 
 172   size_t large_typearray_limit() { return FastAllocateSizeLimit; }
 173 
 174   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 175   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : NULL; }
 176   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : NULL; }
 177 
 178   void ensure_parsability(bool retire_tlabs);
 179   void accumulate_statistics_all_tlabs();
 180   void resize_all_tlabs();
 181 
 182   size_t unsafe_max_alloc();
 183 
 184   bool supports_tlab_allocation() const { return true; }
 185 
 186   size_t tlab_capacity(Thread* thr) const;
 187   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 188 
 189   void oop_iterate(OopClosure* cl);
 190   void object_iterate(ObjectClosure* cl);
 191   void permanent_oop_iterate(OopClosure* cl);
 192   void permanent_object_iterate(ObjectClosure* cl);
 193 
 194   HeapWord* block_start(const void* addr) const;
 195   size_t block_size(const HeapWord* addr) const;
 196   bool block_is_obj(const HeapWord* addr) const;
 197 
 198   jlong millis_since_last_gc();
 199 
 200   void prepare_for_verify();
 201   void print() const;
 202   void print_on(outputStream* st) const;
 203   virtual void print_gc_threads_on(outputStream* st) const;
 204   virtual void gc_threads_do(ThreadClosure* tc) const;
 205   virtual void print_tracing_info() const;
 206 
 207   void verify(bool allow_dirty, bool silent);
 208 
 209   void print_heap_change(size_t prev_used);
 210 
 211   // Resize the young generation.  The reserved space for the
 212   // generation may be expanded in preparation for the resize.
 213   void resize_young_gen(size_t eden_size, size_t survivor_size);
 214 
 215   // Resize the old generation.  The reserved space for the
 216   // generation may be expanded in preparation for the resize.
 217   void resize_old_gen(size_t desired_free_space);
 218 };
 219 
 220 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
 221 {
 222   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
 223   var = round_to(val, intra_generation_alignment());
 224   return var;
 225 }