< prev index next >

src/share/vm/gc/shared/collectedHeap.hpp

Print this page
rev 12906 : [mq]: gc_interface


  76 };
  77 
  78 //
  79 // CollectedHeap
  80 //   GenCollectedHeap
  81 //   G1CollectedHeap
  82 //   ParallelScavengeHeap
  83 //
  84 class CollectedHeap : public CHeapObj<mtInternal> {
  85   friend class VMStructs;
  86   friend class JVMCIVMStructs;
  87   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  88 
  89  private:
  90 #ifdef ASSERT
  91   static int       _fire_out_of_memory_count;
  92 #endif
  93 
  94   GCHeapLog* _gc_heap_log;
  95 
  96   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
  97   // or INCLUDE_JVMCI is being used
  98   bool _defer_initial_card_mark;
  99 
 100   MemRegion _reserved;
 101 
 102  protected:
 103   BarrierSet* _barrier_set;
 104   bool _is_gc_active;
 105 
 106   // Used for filler objects (static, but initialized in ctor).
 107   static size_t _filler_array_max_size;
 108 
 109   unsigned int _total_collections;          // ... started
 110   unsigned int _total_full_collections;     // ... started
 111   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
 112   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
 113 
 114   // Reason for current garbage collection.  Should be set to
 115   // a value reflecting no collection between collections.
 116   GCCause::Cause _gc_cause;
 117   GCCause::Cause _gc_lastcause;
 118   PerfStringVariable* _perf_gc_cause;
 119   PerfStringVariable* _perf_gc_lastcause;


 199   }
 200 
 201   virtual Name kind() const = 0;
 202 
 203   virtual const char* name() const = 0;
 204 
 205   /**
 206    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 207    * and JNI_OK on success.
 208    */
 209   virtual jint initialize() = 0;
 210 
 211   // In many heaps, there will be a need to perform some initialization activities
 212   // after the Universe is fully formed, but before general heap allocation is allowed.
 213   // This is the correct place to place such initialization methods.
 214   virtual void post_initialize() = 0;
 215 
 216   // Stop any onging concurrent work and prepare for exit.
 217   virtual void stop() {}
 218 




 219   void initialize_reserved_region(HeapWord *start, HeapWord *end);
 220   MemRegion reserved_region() const { return _reserved; }
 221   address base() const { return (address)reserved_region().start(); }
 222 
 223   virtual size_t capacity() const = 0;
 224   virtual size_t used() const = 0;
 225 
 226   // Return "true" if the part of the heap that allocates Java
 227   // objects has reached the maximal committed limit that it can
 228   // reach, without a garbage collection.
 229   virtual bool is_maximal_no_gc() const = 0;
 230 
 231   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
 232   // memory that the vm could make available for storing 'normal' java objects.
 233   // This is based on the reserved address space, but should not include space
 234   // that the vm uses internally for bookkeeping or temporary storage
 235   // (e.g., in the case of the young gen, one of the survivor
 236   // spaces).
 237   virtual size_t max_capacity() const = 0;
 238 


 374   // The argument "retire_tlabs" controls whether existing TLABs
 375   // are merely filled or also retired, thus preventing further
 376   // allocation from them and necessitating allocation of new TLABs.
 377   virtual void ensure_parsability(bool retire_tlabs);
 378 
 379   // Section on thread-local allocation buffers (TLABs)
 380   // If the heap supports thread-local allocation buffers, it should override
 381   // the following methods:
 382   // Returns "true" iff the heap supports thread-local allocation buffers.
 383   // The default is "no".
 384   virtual bool supports_tlab_allocation() const = 0;
 385 
 386   // The amount of space available for thread-local allocation buffers.
 387   virtual size_t tlab_capacity(Thread *thr) const = 0;
 388 
 389   // The amount of used space for thread-local allocation buffers for the given thread.
 390   virtual size_t tlab_used(Thread *thr) const = 0;
 391 
 392   virtual size_t max_tlab_size() const;
 393 


 394   // An estimate of the maximum allocation that could be performed
 395   // for thread-local allocation buffers without triggering any
 396   // collection or expansion activity.
 397   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 398     guarantee(false, "thread-local allocation buffers not supported");
 399     return 0;
 400   }
 401 
 402   // Can a compiler initialize a new object without store barriers?
 403   // This permission only extends from the creation of a new object
 404   // via a TLAB up to the first subsequent safepoint. If such permission
 405   // is granted for this heap type, the compiler promises to call
 406   // defer_store_barrier() below on any slow path allocation of
 407   // a new object for which such initializing store barriers will
 408   // have been elided.
 409   virtual bool can_elide_tlab_store_barriers() const = 0;
 410 
 411   // If a compiler is eliding store barriers for TLAB-allocated objects,
 412   // there is probably a corresponding slow path which can produce
 413   // an object allocated anywhere.  The compiler's runtime support
 414   // promises to call this function on such a slow-path-allocated
 415   // object before performing initializations that have elided
 416   // store barriers. Returns new_obj, or maybe a safer copy thereof.
 417   virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
 418 
 419   // Answers whether an initializing store to a new object currently
 420   // allocated at the given address doesn't need a store
 421   // barrier. Returns "true" if it doesn't need an initializing
 422   // store barrier; answers "false" if it does.
 423   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 424 
 425   // If a compiler is eliding store barriers for TLAB-allocated objects,
 426   // we will be informed of a slow-path allocation by a call
 427   // to new_store_pre_barrier() above. Such a call precedes the
 428   // initialization of the object itself, and no post-store-barriers will
 429   // be issued. Some heap types require that the barrier strictly follows
 430   // the initializing stores. (This is currently implemented by deferring the
 431   // barrier until the next slow-path allocation or gc-related safepoint.)
 432   // This interface answers whether a particular heap type needs the card
 433   // mark to be thus strictly sequenced after the stores.
 434   virtual bool card_mark_must_follow_store() const = 0;
 435 
 436   // If the CollectedHeap was asked to defer a store barrier above,
 437   // this informs it to flush such a deferred store barrier to the
 438   // remembered set.
 439   virtual void flush_deferred_store_barrier(JavaThread* thread);
 440 
 441   // Perform a collection of the heap; intended for use in implementing
 442   // "System.gc".  This probably implies as full a collection as the
 443   // "CollectedHeap" supports.
 444   virtual void collect(GCCause::Cause cause) = 0;
 445 
 446   // Perform a full collection
 447   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 448 
 449   // This interface assumes that it's being called by the
 450   // vm thread. It collects the heap assuming that the
 451   // heap lock is already held and that we are executing in
 452   // the context of the vm thread.
 453   virtual void collect_as_vm_thread(GCCause::Cause cause);
 454 
 455   // Returns the barrier set for this heap
 456   BarrierSet* barrier_set() { return _barrier_set; }
 457   void set_barrier_set(BarrierSet* barrier_set);
 458 
 459   // Returns "true" iff there is a stop-world GC in progress.  (I assume




  76 };
  77 
  78 //
  79 // CollectedHeap
  80 //   GenCollectedHeap
  81 //   G1CollectedHeap
  82 //   ParallelScavengeHeap
  83 //
  84 class CollectedHeap : public CHeapObj<mtInternal> {
  85   friend class VMStructs;
  86   friend class JVMCIVMStructs;
  87   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  88 
  89  private:
  90 #ifdef ASSERT
  91   static int       _fire_out_of_memory_count;
  92 #endif
  93 
  94   GCHeapLog* _gc_heap_log;
  95 




  96   MemRegion _reserved;
  97 
  98  protected:
  99   BarrierSet* _barrier_set;
 100   bool _is_gc_active;
 101 
 102   // Used for filler objects (static, but initialized in ctor).
 103   static size_t _filler_array_max_size;
 104 
 105   unsigned int _total_collections;          // ... started
 106   unsigned int _total_full_collections;     // ... started
 107   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
 108   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
 109 
 110   // Reason for current garbage collection.  Should be set to
 111   // a value reflecting no collection between collections.
 112   GCCause::Cause _gc_cause;
 113   GCCause::Cause _gc_lastcause;
 114   PerfStringVariable* _perf_gc_cause;
 115   PerfStringVariable* _perf_gc_lastcause;


 195   }
 196 
 197   virtual Name kind() const = 0;
 198 
 199   virtual const char* name() const = 0;
 200 
 201   /**
 202    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 203    * and JNI_OK on success.
 204    */
 205   virtual jint initialize() = 0;
 206 
 207   // In many heaps, there will be a need to perform some initialization activities
 208   // after the Universe is fully formed, but before general heap allocation is allowed.
 209   // This is the correct place to place such initialization methods.
 210   virtual void post_initialize() = 0;
 211 
 212   // Stop any onging concurrent work and prepare for exit.
 213   virtual void stop() {}
 214 
 215   // Stop and resume concurrent GC threads interfering with safepoint operations
 216   virtual void safepoint_synchronize_begin() {}
 217   virtual void safepoint_synchronize_end() {}
 218 
 219   void initialize_reserved_region(HeapWord *start, HeapWord *end);
 220   MemRegion reserved_region() const { return _reserved; }
 221   address base() const { return (address)reserved_region().start(); }
 222 
 223   virtual size_t capacity() const = 0;
 224   virtual size_t used() const = 0;
 225 
 226   // Return "true" if the part of the heap that allocates Java
 227   // objects has reached the maximal committed limit that it can
 228   // reach, without a garbage collection.
 229   virtual bool is_maximal_no_gc() const = 0;
 230 
 231   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
 232   // memory that the vm could make available for storing 'normal' java objects.
 233   // This is based on the reserved address space, but should not include space
 234   // that the vm uses internally for bookkeeping or temporary storage
 235   // (e.g., in the case of the young gen, one of the survivor
 236   // spaces).
 237   virtual size_t max_capacity() const = 0;
 238 


 374   // The argument "retire_tlabs" controls whether existing TLABs
 375   // are merely filled or also retired, thus preventing further
 376   // allocation from them and necessitating allocation of new TLABs.
 377   virtual void ensure_parsability(bool retire_tlabs);
 378 
 379   // Section on thread-local allocation buffers (TLABs)
 380   // If the heap supports thread-local allocation buffers, it should override
 381   // the following methods:
 382   // Returns "true" iff the heap supports thread-local allocation buffers.
 383   // The default is "no".
 384   virtual bool supports_tlab_allocation() const = 0;
 385 
 386   // The amount of space available for thread-local allocation buffers.
 387   virtual size_t tlab_capacity(Thread *thr) const = 0;
 388 
 389   // The amount of used space for thread-local allocation buffers for the given thread.
 390   virtual size_t tlab_used(Thread *thr) const = 0;
 391 
 392   virtual size_t max_tlab_size() const;
 393 
 394   virtual void verify_nmethod_roots(nmethod* nmethod);
 395 
 396   // An estimate of the maximum allocation that could be performed
 397   // for thread-local allocation buffers without triggering any
 398   // collection or expansion activity.
 399   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 400     guarantee(false, "thread-local allocation buffers not supported");
 401     return 0;
 402   }







































 403 
 404   // Perform a collection of the heap; intended for use in implementing
 405   // "System.gc".  This probably implies as full a collection as the
 406   // "CollectedHeap" supports.
 407   virtual void collect(GCCause::Cause cause) = 0;
 408 
 409   // Perform a full collection
 410   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 411 
 412   // This interface assumes that it's being called by the
 413   // vm thread. It collects the heap assuming that the
 414   // heap lock is already held and that we are executing in
 415   // the context of the vm thread.
 416   virtual void collect_as_vm_thread(GCCause::Cause cause);
 417 
 418   // Returns the barrier set for this heap
 419   BarrierSet* barrier_set() { return _barrier_set; }
 420   void set_barrier_set(BarrierSet* barrier_set);
 421 
 422   // Returns "true" iff there is a stop-world GC in progress.  (I assume


< prev index next >