hotspot/src/share/vm/memory/genCollectedHeap.hpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)genCollectedHeap.hpp 1.106 07/07/22 22:36:34 JVM"
   3 #endif
   4 /*
   5  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 class SubTasksDone;
  29 
  30 // A "GenCollectedHeap" is a SharedHeap that uses generational
  31 // collection.  It is represented with a sequence of Generation's.
  32 class GenCollectedHeap : public SharedHeap {
  33   friend class GenCollectorPolicy;
  34   friend class Generation;
  35   friend class DefNewGeneration;
  36   friend class TenuredGeneration;
  37   friend class ConcurrentMarkSweepGeneration;
  38   friend class CMSCollector;
  39   friend class GenMarkSweep;
  40   friend class VM_GenCollectForAllocation;

  41   friend class VM_GenCollectFull;
  42   friend class VM_GenCollectFullConcurrent;
  43   friend class VM_GC_HeapInspection;
  44   friend class VM_HeapDumper;
  45   friend class HeapInspection;
  46   friend class GCCauseSetter;
  47   friend class VMStructs;
  48 public:
  49   enum SomeConstants {
  50     max_gens = 10
  51   };
  52 
  53   friend class VM_PopulateDumpSharedSpace;
  54 
  55  protected:
  56   // Fields:
  57   static GenCollectedHeap* _gch;
  58 
  59  private:
  60   int _n_gens;


 237 
 238   // Requires "addr" to be the start of a chunk, and returns its size.
 239   // "addr + size" is required to be the start of a new chunk, or the end
 240   // of the active area of the heap. Assumes (and verifies in non-product
 241   // builds) that addr is in the allocated part of the heap and is
 242   // the start of a chunk.
 243   virtual size_t block_size(const HeapWord* addr) const;
 244 
 245   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 246   // the block is an object. Assumes (and verifies in non-product
 247   // builds) that addr is in the allocated part of the heap and is
 248   // the start of a chunk.
 249   virtual bool block_is_obj(const HeapWord* addr) const;
 250 
 251   // Section on TLAB's.
 252   virtual bool supports_tlab_allocation() const;
 253   virtual size_t tlab_capacity(Thread* thr) const;
 254   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 255   virtual HeapWord* allocate_new_tlab(size_t size);
 256 















 257   // The "requestor" generation is performing some garbage collection 
 258   // action for which it would be useful to have scratch space.  The
 259   // requestor promises to allocate no more than "max_alloc_words" in any
 260   // older generation (via promotion say.)   Any blocks of space that can
 261   // be provided are returned as a list of ScratchBlocks, sorted by
 262   // decreasing size.
 263   ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);



 264 
 265   size_t large_typearray_limit();
 266 
 267   // Ensure parsability: override
 268   virtual void ensure_parsability(bool retire_tlabs);
 269 
 270   // Time in ms since the longest time a collector ran in
 271   // in any generation.
 272   virtual jlong millis_since_last_gc();
 273 
 274   // Total number of full collections completed.
 275   unsigned int total_full_collections_completed() {
 276     assert(_full_collections_completed <= _total_full_collections,
 277            "Can't complete more collections than were started");
 278     return _full_collections_completed;
 279   }
 280 
 281   // Update above counter, as appropriate, at the end of a stop-world GC cycle
 282   unsigned int update_full_collections_completed();
 283   // Update above counter, as appropriate, at the end of a concurrent GC cycle


 438   void clear_incremental_collection_will_fail() {
 439     _incremental_collection_will_fail = false;
 440   }
 441 
 442   bool last_incremental_collection_failed() const {
 443     return _last_incremental_collection_failed;
 444   }
 445   void set_last_incremental_collection_failed() {
 446     _last_incremental_collection_failed = true;
 447   }
 448   void clear_last_incremental_collection_failed() {
 449     _last_incremental_collection_failed = false;
 450   }
 451 
 452   // Promotion of obj into gen failed.  Try to promote obj to higher non-perm
 453   // gens in ascending order; return the new location of obj if successful.
 454   // Otherwise, try expand-and-allocate for obj in each generation starting at
 455   // gen; return the new location of obj if successful.  Otherwise, return NULL.
 456   oop handle_failed_promotion(Generation* gen,
 457                               oop obj,
 458                               size_t obj_size,
 459                               oop* ref);
 460 
 461 private:
 462   // Accessor for memory state verification support
 463   NOT_PRODUCT(
 464     static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
 465   )
 466 
 467   // Override
 468   void check_for_non_bad_heap_word_value(HeapWord* addr,
 469     size_t size) PRODUCT_RETURN;
 470 
 471   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
 472   // in an essential way: compaction is performed across generations, by
 473   // iterating over spaces.
 474   void prepare_for_compaction();
 475 
 476   // Perform a full collection of the first max_level+1 generations.
 477   // This is the low level interface used by the public versions of
 478   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
 479   void collect_locked(GCCause::Cause cause, int max_level);
 480 
 481   // Returns success or failure.
 482   bool create_cms_collector();
 483 
 484   // In support of ExplicitGCInvokesConcurrent functionality
 485   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 486   void collect_mostly_concurrent(GCCause::Cause cause);
 487 



 488 protected:
 489   virtual void gc_prologue(bool full);
 490   virtual void gc_epilogue(bool full);
 491   
 492 public:
 493   virtual void preload_and_dump(TRAPS) KERNEL_RETURN;
 494 };
   1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)genCollectedHeap.hpp 1.106 07/07/22 22:36:34 JVM"
   3 #endif
   4 /*
   5  * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 class SubTasksDone;
  29 
  30 // A "GenCollectedHeap" is a SharedHeap that uses generational
  31 // collection.  It is represented with a sequence of Generation's.
  32 class GenCollectedHeap : public SharedHeap {
  33   friend class GenCollectorPolicy;
  34   friend class Generation;
  35   friend class DefNewGeneration;
  36   friend class TenuredGeneration;
  37   friend class ConcurrentMarkSweepGeneration;
  38   friend class CMSCollector;
  39   friend class GenMarkSweep;
  40   friend class VM_GenCollectForAllocation;
  41   friend class VM_GenCollectForPermanentAllocation;
  42   friend class VM_GenCollectFull;
  43   friend class VM_GenCollectFullConcurrent;
  44   friend class VM_GC_HeapInspection;
  45   friend class VM_HeapDumper;
  46   friend class HeapInspection;
  47   friend class GCCauseSetter;
  48   friend class VMStructs;
  49 public:
  50   enum SomeConstants {
  51     max_gens = 10
  52   };
  53 
  54   friend class VM_PopulateDumpSharedSpace;
  55 
  56  protected:
  57   // Fields:
  58   static GenCollectedHeap* _gch;
  59 
  60  private:
  61   int _n_gens;


 238 
 239   // Requires "addr" to be the start of a chunk, and returns its size.
 240   // "addr + size" is required to be the start of a new chunk, or the end
 241   // of the active area of the heap. Assumes (and verifies in non-product
 242   // builds) that addr is in the allocated part of the heap and is
 243   // the start of a chunk.
 244   virtual size_t block_size(const HeapWord* addr) const;
 245 
 246   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 247   // the block is an object. Assumes (and verifies in non-product
 248   // builds) that addr is in the allocated part of the heap and is
 249   // the start of a chunk.
 250   virtual bool block_is_obj(const HeapWord* addr) const;
 251 
 252   // Section on TLAB's.
 253   virtual bool supports_tlab_allocation() const;
 254   virtual size_t tlab_capacity(Thread* thr) const;
 255   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 256   virtual HeapWord* allocate_new_tlab(size_t size);
 257 
 258   // Can a compiler initialize a new object without store barriers?
 259   // This permission only extends from the creation of a new object
 260   // via a TLAB up to the first subsequent safepoint.
 261   virtual bool can_elide_tlab_store_barriers() const {
 262     return true;
 263   }
 264 
 265   // Can a compiler elide a store barrier when it writes
 266   // a permanent oop into the heap?  Applies when the compiler
 267   // is storing x to the heap, where x->is_perm() is true.
 268   virtual bool can_elide_permanent_oop_store_barriers() const {
 269     // CMS needs to see all, even intra-generational, ref updates.
 270     return !UseConcMarkSweepGC;
 271   }
 272 
 273   // The "requestor" generation is performing some garbage collection
 274   // action for which it would be useful to have scratch space.  The
 275   // requestor promises to allocate no more than "max_alloc_words" in any
 276   // older generation (via promotion say.)   Any blocks of space that can
 277   // be provided are returned as a list of ScratchBlocks, sorted by
 278   // decreasing size.
 279   ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
 280   // Allow each generation to reset any scratch space that it has
 281   // contributed as it needs.
 282   void release_scratch();
 283 
 284   size_t large_typearray_limit();
 285 
 286   // Ensure parsability: override
 287   virtual void ensure_parsability(bool retire_tlabs);
 288 
 289   // Time in ms since the longest time a collector ran in
 290   // in any generation.
 291   virtual jlong millis_since_last_gc();
 292 
 293   // Total number of full collections completed.
 294   unsigned int total_full_collections_completed() {
 295     assert(_full_collections_completed <= _total_full_collections,
 296            "Can't complete more collections than were started");
 297     return _full_collections_completed;
 298   }
 299 
 300   // Update above counter, as appropriate, at the end of a stop-world GC cycle
 301   unsigned int update_full_collections_completed();
 302   // Update above counter, as appropriate, at the end of a concurrent GC cycle


 457   void clear_incremental_collection_will_fail() {
 458     _incremental_collection_will_fail = false;
 459   }
 460 
 461   bool last_incremental_collection_failed() const {
 462     return _last_incremental_collection_failed;
 463   }
 464   void set_last_incremental_collection_failed() {
 465     _last_incremental_collection_failed = true;
 466   }
 467   void clear_last_incremental_collection_failed() {
 468     _last_incremental_collection_failed = false;
 469   }
 470 
 471   // Promotion of obj into gen failed.  Try to promote obj to higher non-perm
 472   // gens in ascending order; return the new location of obj if successful.
 473   // Otherwise, try expand-and-allocate for obj in each generation starting at
 474   // gen; return the new location of obj if successful.  Otherwise, return NULL.
 475   oop handle_failed_promotion(Generation* gen,
 476                               oop obj,
 477                               size_t obj_size);

 478 
 479 private:
 480   // Accessor for memory state verification support
 481   NOT_PRODUCT(
 482     static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
 483   )
 484 
 485   // Override
 486   void check_for_non_bad_heap_word_value(HeapWord* addr,
 487     size_t size) PRODUCT_RETURN;
 488 
 489   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
 490   // in an essential way: compaction is performed across generations, by
 491   // iterating over spaces.
 492   void prepare_for_compaction();
 493 
 494   // Perform a full collection of the first max_level+1 generations.
 495   // This is the low level interface used by the public versions of
 496   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
 497   void collect_locked(GCCause::Cause cause, int max_level);
 498 
 499   // Returns success or failure.
 500   bool create_cms_collector();
 501 
 502   // In support of ExplicitGCInvokesConcurrent functionality
 503   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 504   void collect_mostly_concurrent(GCCause::Cause cause);
 505 
 506   // Save the tops of the spaces in all generations
 507   void record_gen_tops_before_GC() PRODUCT_RETURN;
 508 
 509 protected:
 510   virtual void gc_prologue(bool full);
 511   virtual void gc_epilogue(bool full);
 512   
 513 public:
 514   virtual void preload_and_dump(TRAPS) KERNEL_RETURN;
 515 };