< prev index next >

src/share/vm/gc/shared/collectedHeap.hpp

Print this page
rev 11747 : [mq]: per.hotspot.patch


 424   // barrier. Returns "true" if it doesn't need an initializing
 425   // store barrier; answers "false" if it does.
 426   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 427 
 428   // If a compiler is eliding store barriers for TLAB-allocated objects,
 429   // we will be informed of a slow-path allocation by a call
 430   // to new_store_pre_barrier() above. Such a call precedes the
 431   // initialization of the object itself, and no post-store-barriers will
 432   // be issued. Some heap types require that the barrier strictly follows
 433   // the initializing stores. (This is currently implemented by deferring the
 434   // barrier until the next slow-path allocation or gc-related safepoint.)
 435   // This interface answers whether a particular heap type needs the card
 436   // mark to be thus strictly sequenced after the stores.
 437   virtual bool card_mark_must_follow_store() const = 0;
 438 
 439   // If the CollectedHeap was asked to defer a store barrier above,
 440   // this informs it to flush such a deferred store barrier to the
 441   // remembered set.
 442   virtual void flush_deferred_store_barrier(JavaThread* thread);
 443 
 444   // Should return true if the reference pending list lock is
 445   // acquired from non-Java threads, such as a concurrent GC thread.
 446   virtual bool needs_reference_pending_list_locker_thread() const {
 447     return false;
 448   }
 449 
 450   // Perform a collection of the heap; intended for use in implementing
 451   // "System.gc".  This probably implies as full a collection as the
 452   // "CollectedHeap" supports.
 453   virtual void collect(GCCause::Cause cause) = 0;
 454 
 455   // Perform a full collection
 456   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 457 
 458   // This interface assumes that it's being called by the
 459   // vm thread. It collects the heap assuming that the
 460   // heap lock is already held and that we are executing in
 461   // the context of the vm thread.
 462   virtual void collect_as_vm_thread(GCCause::Cause cause);
 463 
 464   // Returns the barrier set for this heap
 465   BarrierSet* barrier_set() { return _barrier_set; }
 466   void set_barrier_set(BarrierSet* barrier_set);
 467 
 468   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 469   // that it should answer "false" for the concurrent part of a concurrent




 424   // barrier. Returns "true" if it doesn't need an initializing
 425   // store barrier; answers "false" if it does.
 426   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 427 
 428   // If a compiler is eliding store barriers for TLAB-allocated objects,
 429   // we will be informed of a slow-path allocation by a call
 430   // to new_store_pre_barrier() above. Such a call precedes the
 431   // initialization of the object itself, and no post-store-barriers will
 432   // be issued. Some heap types require that the barrier strictly follows
 433   // the initializing stores. (This is currently implemented by deferring the
 434   // barrier until the next slow-path allocation or gc-related safepoint.)
 435   // This interface answers whether a particular heap type needs the card
 436   // mark to be thus strictly sequenced after the stores.
 437   virtual bool card_mark_must_follow_store() const = 0;
 438 
 439   // If the CollectedHeap was asked to defer a store barrier above,
 440   // this informs it to flush such a deferred store barrier to the
 441   // remembered set.
 442   virtual void flush_deferred_store_barrier(JavaThread* thread);
 443 






 444   // Perform a collection of the heap; intended for use in implementing
 445   // "System.gc".  This probably implies as full a collection as the
 446   // "CollectedHeap" supports.
 447   virtual void collect(GCCause::Cause cause) = 0;
 448 
 449   // Perform a full collection
 450   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
 451 
 452   // This interface assumes that it's being called by the
 453   // vm thread. It collects the heap assuming that the
 454   // heap lock is already held and that we are executing in
 455   // the context of the vm thread.
 456   virtual void collect_as_vm_thread(GCCause::Cause cause);
 457 
 458   // Returns the barrier set for this heap
 459   BarrierSet* barrier_set() { return _barrier_set; }
 460   void set_barrier_set(BarrierSet* barrier_set);
 461 
 462   // Returns "true" iff there is a stop-world GC in progress.  (I assume
 463   // that it should answer "false" for the concurrent part of a concurrent


< prev index next >