< prev index next >

src/share/vm/gc/shared/genCollectedHeap.hpp

Print this page
rev 11747 : [mq]: per.hotspot.patch


 264   virtual bool block_is_obj(const HeapWord* addr) const;
 265 
 266   // Section on TLAB's.
 267   virtual bool supports_tlab_allocation() const;
 268   virtual size_t tlab_capacity(Thread* thr) const;
 269   virtual size_t tlab_used(Thread* thr) const;
 270   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 271   virtual HeapWord* allocate_new_tlab(size_t size);
 272 
 273   // Can a compiler initialize a new object without store barriers?
 274   // This permission only extends from the creation of a new object
 275   // via a TLAB up to the first subsequent safepoint.
 276   virtual bool can_elide_tlab_store_barriers() const {
 277     return true;
 278   }
 279 
 280   virtual bool card_mark_must_follow_store() const {
 281     return UseConcMarkSweepGC;
 282   }
 283 
 284   virtual bool needs_reference_pending_list_locker_thread() const {
 285     return UseConcMarkSweepGC;
 286   }
 287 
 288   // We don't need barriers for stores to objects in the
 289   // young gen and, a fortiori, for initializing stores to
 290   // objects therein. This applies to DefNew+Tenured and ParNew+CMS
 291   // only and may need to be re-examined in case other
 292   // kinds of collectors are implemented in the future.
 293   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
 294     return is_in_young(new_obj);
 295   }
 296 
 297   // The "requestor" generation is performing some garbage collection
 298   // action for which it would be useful to have scratch space.  The
 299   // requestor promises to allocate no more than "max_alloc_words" in any
 300   // older generation (via promotion say.)   Any blocks of space that can
 301   // be provided are returned as a list of ScratchBlocks, sorted by
 302   // decreasing size.
 303   ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
 304   // Allow each generation to reset any scratch space that it has
 305   // contributed as it needs.
 306   void release_scratch();
 307 




 264   virtual bool block_is_obj(const HeapWord* addr) const;
 265 
 266   // Section on TLAB's.
 267   virtual bool supports_tlab_allocation() const;
 268   virtual size_t tlab_capacity(Thread* thr) const;
 269   virtual size_t tlab_used(Thread* thr) const;
 270   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 271   virtual HeapWord* allocate_new_tlab(size_t size);
 272 
 273   // Can a compiler initialize a new object without store barriers?
 274   // This permission only extends from the creation of a new object
 275   // via a TLAB up to the first subsequent safepoint.
 276   virtual bool can_elide_tlab_store_barriers() const {
 277     return true;
 278   }
 279 
 280   virtual bool card_mark_must_follow_store() const {
 281     return UseConcMarkSweepGC;
 282   }
 283 




 284   // We don't need barriers for stores to objects in the
 285   // young gen and, a fortiori, for initializing stores to
 286   // objects therein. This applies to DefNew+Tenured and ParNew+CMS
 287   // only and may need to be re-examined in case other
 288   // kinds of collectors are implemented in the future.
 289   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
 290     return is_in_young(new_obj);
 291   }
 292 
 293   // The "requestor" generation is performing some garbage collection
 294   // action for which it would be useful to have scratch space.  The
 295   // requestor promises to allocate no more than "max_alloc_words" in any
 296   // older generation (via promotion say.)   Any blocks of space that can
 297   // be provided are returned as a list of ScratchBlocks, sorted by
 298   // decreasing size.
 299   ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
 300   // Allow each generation to reset any scratch space that it has
 301   // contributed as it needs.
 302   void release_scratch();
 303 


< prev index next >