< prev index next >

src/share/vm/gc/shared/collectedHeap.hpp

Print this page




 421   // barrier. Returns "true" if it doesn't need an initializing
 422   // store barrier; answers "false" if it does.
 423   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 424 
 425   // If a compiler is eliding store barriers for TLAB-allocated objects,
 426   // we will be informed of a slow-path allocation by a call
 427   // to new_store_pre_barrier() above. Such a call precedes the
 428   // initialization of the object itself, and no post-store-barriers will
 429   // be issued. Some heap types require that the barrier strictly follows
 430   // the initializing stores. (This is currently implemented by deferring the
 431   // barrier until the next slow-path allocation or gc-related safepoint.)
 432   // This interface answers whether a particular heap type needs the card
 433   // mark to be thus strictly sequenced after the stores.
 434   virtual bool card_mark_must_follow_store() const = 0;
 435 
 436   // If the CollectedHeap was asked to defer a store barrier above,
 437   // this informs it to flush such a deferred store barrier to the
 438   // remembered set.
 439   virtual void flush_deferred_store_barrier(JavaThread* thread);
 440 






 441   // Perform a collection of the heap; intended for use in implementing
 442   // "System.gc".  This probably implies as full a collection as the
 443   // "CollectedHeap" supports.
 444   virtual void collect(GCCause::Cause cause) = 0;
 445 
 446   // Perform a full collection
 447   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 448 
 449   // This interface assumes that it's being called by the
 450   // vm thread. It collects the heap assuming that the
 451   // heap lock is already held and that we are executing in
 452   // the context of the vm thread.
 453   virtual void collect_as_vm_thread(GCCause::Cause cause);
 454 
 455   // Returns the barrier set for this heap
 456   BarrierSet* barrier_set() { return _barrier_set; }
 457   void set_barrier_set(BarrierSet* barrier_set);
 458 
 459   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 460   // that it should answer "false" for the concurrent part of a concurrent




 421   // barrier. Returns "true" if it doesn't need an initializing
 422   // store barrier; answers "false" if it does.
 423   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 424 
 425   // If a compiler is eliding store barriers for TLAB-allocated objects,
 426   // we will be informed of a slow-path allocation by a call
 427   // to new_store_pre_barrier() above. Such a call precedes the
 428   // initialization of the object itself, and no post-store-barriers will
 429   // be issued. Some heap types require that the barrier strictly follows
 430   // the initializing stores. (This is currently implemented by deferring the
 431   // barrier until the next slow-path allocation or gc-related safepoint.)
 432   // This interface answers whether a particular heap type needs the card
 433   // mark to be thus strictly sequenced after the stores.
 434   virtual bool card_mark_must_follow_store() const = 0;
 435 
 436   // If the CollectedHeap was asked to defer a store barrier above,
 437   // this informs it to flush such a deferred store barrier to the
 438   // remembered set.
 439   virtual void flush_deferred_store_barrier(JavaThread* thread);
 440 
 441   // Should return true if the reference pending list lock is
 442   // acquired from non-Java threads, such as a concurrent GC thread.
 443   virtual bool needs_reference_pending_list_locker_thread() const {
 444     return false;
 445   }
 446 
 447   // Perform a collection of the heap; intended for use in implementing
 448   // "System.gc".  This probably implies as full a collection as the
 449   // "CollectedHeap" supports.
 450   virtual void collect(GCCause::Cause cause) = 0;
 451 
 452   // Perform a full collection
 453   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 454 
 455   // This interface assumes that it's being called by the
 456   // vm thread. It collects the heap assuming that the
 457   // heap lock is already held and that we are executing in
 458   // the context of the vm thread.
 459   virtual void collect_as_vm_thread(GCCause::Cause cause);
 460 
 461   // Returns the barrier set for this heap
 462   BarrierSet* barrier_set() { return _barrier_set; }
 463   void set_barrier_set(BarrierSet* barrier_set);
 464 
 465   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 466   // that it should answer "false" for the concurrent part of a concurrent


< prev index next >