< prev index next >

src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp

Print this page
rev 50095 : [mq]: allocations-rt.patch


 161   // the young gen.
 162   virtual bool is_scavengable(oop obj);
 163   virtual void register_nmethod(nmethod* nm);
 164   virtual void verify_nmethod(nmethod* nmethod);
 165 
 166   size_t max_capacity() const;
 167 
 168   // Whether p is in the allocated part of the heap
 169   bool is_in(const void* p) const;
 170 
 171   bool is_in_reserved(const void* p) const;
 172 
 173   bool is_in_young(oop p);  // reserved part
 174   bool is_in_old(oop p);    // reserved part
 175 
 176   // Memory allocation.   "gc_time_limit_was_exceeded" will
 177   // be set to true if the adaptive size policy determine that
 178   // an excessive amount of time is being spent doing collections
 179   // and caused a NULL to be returned.  If a NULL is not returned,
 180   // "gc_time_limit_was_exceeded" has an undefined meaning.
 181   HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);

 182 
 183   // Allocation attempt(s) during a safepoint. It should never be called
 184   // to allocate a new TLAB as this allocation might be satisfied out
 185   // of the old generation.
 186   HeapWord* failed_mem_allocate(size_t size);
 187 
 188   // Support for System.gc()
 189   void collect(GCCause::Cause cause);
 190 
 191   // These also should be called by the vm thread at a safepoint (e.g., from a
 192   // VM operation).
 193   //
 194   // The first collects the young generation only, unless the scavenge fails; it
 195   // will then attempt a full gc.  The second collects the entire heap; if
 196   // maximum_compaction is true, it will compact everything and clear all soft
 197   // references.
 198   inline void invoke_scavenge();
 199 
 200   // Perform a full collection
 201   virtual void do_full_collection(bool clear_all_soft_refs);




 161   // the young gen.
 162   virtual bool is_scavengable(oop obj);
 163   virtual void register_nmethod(nmethod* nm);
 164   virtual void verify_nmethod(nmethod* nmethod);
 165 
 166   size_t max_capacity() const;
 167 
 168   // Whether p is in the allocated part of the heap
 169   bool is_in(const void* p) const;
 170 
 171   bool is_in_reserved(const void* p) const;
 172 
 173   bool is_in_young(oop p);  // reserved part
 174   bool is_in_old(oop p);    // reserved part
 175 
 176   // Memory allocation.   "gc_time_limit_was_exceeded" will
 177   // be set to true if the adaptive size policy determine that
 178   // an excessive amount of time is being spent doing collections
 179   // and caused a NULL to be returned.  If a NULL is not returned,
 180   // "gc_time_limit_was_exceeded" has an undefined meaning.
 181   HeapWord* mem_allocate(size_t size,  Klass* klass, Thread* thread,
 182                          bool* gc_overhead_limit_was_exceeded);
 183 
 184   // Allocation attempt(s) during a safepoint. It should never be called
 185   // to allocate a new TLAB as this allocation might be satisfied out
 186   // of the old generation.
 187   HeapWord* failed_mem_allocate(size_t size);
 188 
 189   // Support for System.gc()
 190   void collect(GCCause::Cause cause);
 191 
 192   // These also should be called by the vm thread at a safepoint (e.g., from a
 193   // VM operation).
 194   //
 195   // The first collects the young generation only, unless the scavenge fails; it
 196   // will then attempt a full gc.  The second collects the entire heap; if
 197   // maximum_compaction is true, it will compact everything and clear all soft
 198   // references.
 199   inline void invoke_scavenge();
 200 
 201   // Perform a full collection
 202   virtual void do_full_collection(bool clear_all_soft_refs);


< prev index next >