< prev index next >

src/share/vm/gc/shared/genCollectedHeap.hpp

Print this page
rev 12906 : [mq]: gc_interface


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
  27 
  28 #include "gc/shared/adaptiveSizePolicy.hpp"

  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectorPolicy.hpp"
  31 #include "gc/shared/generation.hpp"
  32 
  33 class StrongRootsScope;
  34 class SubTasksDone;
  35 class WorkGang;
  36 
  37 // A "GenCollectedHeap" is a CollectedHeap that uses generational
  38 // collection.  It has two generations, young and old.
  39 class GenCollectedHeap : public CollectedHeap {
  40   friend class GenCollectorPolicy;
  41   friend class Generation;
  42   friend class DefNewGeneration;
  43   friend class TenuredGeneration;
  44   friend class ConcurrentMarkSweepGeneration;
  45   friend class CMSCollector;
  46   friend class GenMarkSweep;
  47   friend class VM_GenCollectForAllocation;
  48   friend class VM_GenCollectFull;


 253   // Requires "addr" to be the start of a chunk, and returns its size.
 254   // "addr + size" is required to be the start of a new chunk, or the end
 255   // of the active area of the heap. Assumes (and verifies in non-product
 256   // builds) that addr is in the allocated part of the heap and is
 257   // the start of a chunk.
 258   virtual size_t block_size(const HeapWord* addr) const;
 259 
 260   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 261   // the block is an object. Assumes (and verifies in non-product
 262   // builds) that addr is in the allocated part of the heap and is
 263   // the start of a chunk.
 264   virtual bool block_is_obj(const HeapWord* addr) const;
 265 
 266   // Section on TLAB's.
 267   virtual bool supports_tlab_allocation() const;
 268   virtual size_t tlab_capacity(Thread* thr) const;
 269   virtual size_t tlab_used(Thread* thr) const;
 270   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 271   virtual HeapWord* allocate_new_tlab(size_t size);
 272 
 273   // Can a compiler initialize a new object without store barriers?
 274   // This permission only extends from the creation of a new object
 275   // via a TLAB up to the first subsequent safepoint.
 276   virtual bool can_elide_tlab_store_barriers() const {
 277     return true;
 278   }
 279 
 280   virtual bool card_mark_must_follow_store() const {
 281     return UseConcMarkSweepGC;
 282   }
 283 
 284   // We don't need barriers for stores to objects in the
 285   // young gen and, a fortiori, for initializing stores to
 286   // objects therein. This applies to DefNew+Tenured and ParNew+CMS
 287   // only and may need to be re-examined in case other
 288   // kinds of collectors are implemented in the future.
 289   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
 290     return is_in_young(new_obj);
 291   }
 292 
 293   // The "requestor" generation is performing some garbage collection
 294   // action for which it would be useful to have scratch space.  The
 295   // requestor promises to allocate no more than "max_alloc_words" in any
 296   // older generation (via promotion say.)   Any blocks of space that can
 297   // be provided are returned as a list of ScratchBlocks, sorted by
 298   // decreasing size.
 299   ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
 300   // Allow each generation to reset any scratch space that it has
 301   // contributed as it needs.
 302   void release_scratch();
 303 
 304   // Ensure parsability: override
 305   virtual void ensure_parsability(bool retire_tlabs);
 306 
 307   // Time in ms since the longest time a collector ran in
 308   // in any generation.
 309   virtual jlong millis_since_last_gc();
 310 
 311   // Total number of full collections completed.
 312   unsigned int total_full_collections_completed() {


 498   // This is the low level interface used by the public versions of
 499   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
 500   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
 501 
 502   // Returns success or failure.
 503   bool create_cms_collector();
 504 
 505   // In support of ExplicitGCInvokesConcurrent functionality
 506   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 507   void collect_mostly_concurrent(GCCause::Cause cause);
 508 
 509   // Save the tops of the spaces in all generations
 510   void record_gen_tops_before_GC() PRODUCT_RETURN;
 511 
 512 protected:
 513   void gc_prologue(bool full);
 514   void gc_epilogue(bool full);
 515 
 516 public:
 517   void stop();






 518 };
 519 
 520 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
  27 
  28 #include "gc/shared/adaptiveSizePolicy.hpp"
  29 #include "gc/shared/cardTableModRefBS.hpp"
  30 #include "gc/shared/collectedHeap.hpp"
  31 #include "gc/shared/collectorPolicy.hpp"
  32 #include "gc/shared/generation.hpp"
  33 
  34 class StrongRootsScope;
  35 class SubTasksDone;
  36 class WorkGang;
  37 
  38 // A "GenCollectedHeap" is a CollectedHeap that uses generational
  39 // collection.  It has two generations, young and old.
  40 class GenCollectedHeap : public CollectedHeap {
  41   friend class GenCollectorPolicy;
  42   friend class Generation;
  43   friend class DefNewGeneration;
  44   friend class TenuredGeneration;
  45   friend class ConcurrentMarkSweepGeneration;
  46   friend class CMSCollector;
  47   friend class GenMarkSweep;
  48   friend class VM_GenCollectForAllocation;
  49   friend class VM_GenCollectFull;


 254   // Requires "addr" to be the start of a chunk, and returns its size.
 255   // "addr + size" is required to be the start of a new chunk, or the end
 256   // of the active area of the heap. Assumes (and verifies in non-product
 257   // builds) that addr is in the allocated part of the heap and is
 258   // the start of a chunk.
 259   virtual size_t block_size(const HeapWord* addr) const;
 260 
 261   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 262   // the block is an object. Assumes (and verifies in non-product
 263   // builds) that addr is in the allocated part of the heap and is
 264   // the start of a chunk.
 265   virtual bool block_is_obj(const HeapWord* addr) const;
 266 
 267   // Section on TLAB's.
 268   virtual bool supports_tlab_allocation() const;
 269   virtual size_t tlab_capacity(Thread* thr) const;
 270   virtual size_t tlab_used(Thread* thr) const;
 271   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 272   virtual HeapWord* allocate_new_tlab(size_t size);
 273 




















 274   // The "requestor" generation is performing some garbage collection
 275   // action for which it would be useful to have scratch space.  The
 276   // requestor promises to allocate no more than "max_alloc_words" in any
 277   // older generation (via promotion say.)   Any blocks of space that can
 278   // be provided are returned as a list of ScratchBlocks, sorted by
 279   // decreasing size.
 280   ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
 281   // Allow each generation to reset any scratch space that it has
 282   // contributed as it needs.
 283   void release_scratch();
 284 
 285   // Ensure parsability: override
 286   virtual void ensure_parsability(bool retire_tlabs);
 287 
 288   // Time in ms since the longest time a collector ran in
 289   // in any generation.
 290   virtual jlong millis_since_last_gc();
 291 
 292   // Total number of full collections completed.
 293   unsigned int total_full_collections_completed() {


 479   // This is the low level interface used by the public versions of
 480   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
 481   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
 482 
 483   // Returns success or failure.
 484   bool create_cms_collector();
 485 
 486   // In support of ExplicitGCInvokesConcurrent functionality
 487   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 488   void collect_mostly_concurrent(GCCause::Cause cause);
 489 
 490   // Save the tops of the spaces in all generations
 491   void record_gen_tops_before_GC() PRODUCT_RETURN;
 492 
 493 protected:
 494   void gc_prologue(bool full);
 495   void gc_epilogue(bool full);
 496 
 497 public:
 498   void stop();
 499   void safepoint_synchronize_begin();
 500   void safepoint_synchronize_end();
 501 
 502   CardTableModRefBS *barrier_set() {
 503     return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
 504   }
 505 };
 506 
 507 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
< prev index next >