< prev index next >

src/hotspot/share/gc/shared/collectedHeap.hpp

Print this page
rev 52072 : 8211955: GC abstraction for LAB reserve


 292   // min_fill_size() is the smallest region that can be filled.
 293   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 294   // multiple objects.  fill_with_object() is for regions known to be smaller
 295   // than the largest array of integers; it uses a single object to fill the
 296   // region and has slightly less overhead.
 297   static size_t min_fill_size() {
 298     return size_t(align_object_size(oopDesc::header_size()));
 299   }
 300 
 301   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 302 
 303   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 304   static void fill_with_object(MemRegion region, bool zap = true) {
 305     fill_with_object(region.start(), region.word_size(), zap);
 306   }
 307   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 308     fill_with_object(start, pointer_delta(end, start), zap);
 309   }
 310 
 311   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);


 312 
 313   // Return the address "addr" aligned by "alignment_in_bytes" if such
 314   // an address is below "end".  Return NULL otherwise.
 315   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
 316                                                    HeapWord* end,
 317                                                    unsigned short alignment_in_bytes);
 318 
 319   // Some heaps may offer a contiguous region for shared non-blocking
 320   // allocation, via inlined code (by exporting the address of the top and
 321   // end fields defining the extent of the contiguous allocation region.)
 322 
 323   // This function returns "true" iff the heap supports this kind of
 324   // allocation.  (Default is "no".)
 325   virtual bool supports_inline_contig_alloc() const {
 326     return false;
 327   }
 328   // These functions return the addresses of the fields that define the
 329   // boundaries of the contiguous allocation area.  (These fields should be
 330   // physically near to one another.)
 331   virtual HeapWord* volatile* top_addr() const {


 559   // GCs that use a GC worker thread pool may want to share
 560   // it for use during safepoint cleanup. This is only possible
 561   // if the GC can pause and resume concurrent work (e.g. G1
 562   // concurrent marking) for an intermittent non-GC safepoint.
 563   // If this method returns NULL, SafepointSynchronize will
 564   // perform cleanup tasks serially in the VMThread.
 565   virtual WorkGang* get_safepoint_workers() { return NULL; }
 566 
 567   // Support for object pinning. This is used by JNI Get*Critical()
 568   // and Release*Critical() family of functions. If supported, the GC
 569   // must guarantee that pinned objects never move.
 570   virtual bool supports_object_pinning() const;
 571   virtual oop pin_object(JavaThread* thread, oop obj);
 572   virtual void unpin_object(JavaThread* thread, oop obj);
 573 
 574   // Deduplicate the string, iff the GC supports string deduplication.
 575   virtual void deduplicate_string(oop str);
 576 
 577   virtual bool is_oop(oop object) const;
 578 
 579   virtual size_t obj_size(oop obj) const;
 580 
 581   // Non product verification and debugging.
 582 #ifndef PRODUCT
 583   // Support for PromotionFailureALot.  Return true if it's time to cause a
 584   // promotion failure.  The no-argument version uses
 585   // this->_promotion_failure_alot_count as the counter.
 586   bool promotion_should_fail(volatile size_t* count);
 587   bool promotion_should_fail();
 588 
 589   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 590   // GC in which promotion failure occurred.
 591   void reset_promotion_should_fail(volatile size_t* count);
 592   void reset_promotion_should_fail();
 593 #endif  // #ifndef PRODUCT
 594 
 595 #ifdef ASSERT
 596   static int fired_fake_oom() {
 597     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 598   }
 599 #endif


 292   // min_fill_size() is the smallest region that can be filled.
 293   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 294   // multiple objects.  fill_with_object() is for regions known to be smaller
 295   // than the largest array of integers; it uses a single object to fill the
 296   // region and has slightly less overhead.
 297   static size_t min_fill_size() {
 298     return size_t(align_object_size(oopDesc::header_size()));
 299   }
 300 
 301   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 302 
 303   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 304   static void fill_with_object(MemRegion region, bool zap = true) {
 305     fill_with_object(region.start(), region.word_size(), zap);
 306   }
 307   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 308     fill_with_object(start, pointer_delta(end, start), zap);
 309   }
 310 
 311   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
 312   size_t min_dummy_object_size() const;
 313   size_t tlab_alloc_reserve() const;
 314 
 315   // Return the address "addr" aligned by "alignment_in_bytes" if such
 316   // an address is below "end".  Return NULL otherwise.
 317   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
 318                                                    HeapWord* end,
 319                                                    unsigned short alignment_in_bytes);
 320 
 321   // Some heaps may offer a contiguous region for shared non-blocking
 322   // allocation, via inlined code (by exporting the address of the top and
 323   // end fields defining the extent of the contiguous allocation region.)
 324 
 325   // This function returns "true" iff the heap supports this kind of
 326   // allocation.  (Default is "no".)
 327   virtual bool supports_inline_contig_alloc() const {
 328     return false;
 329   }
 330   // These functions return the addresses of the fields that define the
 331   // boundaries of the contiguous allocation area.  (These fields should be
 332   // physically near to one another.)
 333   virtual HeapWord* volatile* top_addr() const {


 561   // GCs that use a GC worker thread pool may want to share
 562   // it for use during safepoint cleanup. This is only possible
 563   // if the GC can pause and resume concurrent work (e.g. G1
 564   // concurrent marking) for an intermittent non-GC safepoint.
 565   // If this method returns NULL, SafepointSynchronize will
 566   // perform cleanup tasks serially in the VMThread.
 567   virtual WorkGang* get_safepoint_workers() { return NULL; }
 568 
 569   // Support for object pinning. This is used by JNI Get*Critical()
 570   // and Release*Critical() family of functions. If supported, the GC
 571   // must guarantee that pinned objects never move.
 572   virtual bool supports_object_pinning() const;
 573   virtual oop pin_object(JavaThread* thread, oop obj);
 574   virtual void unpin_object(JavaThread* thread, oop obj);
 575 
 576   // Deduplicate the string, iff the GC supports string deduplication.
 577   virtual void deduplicate_string(oop str);
 578 
 579   virtual bool is_oop(oop object) const;
 580 
 581   virtual size_t cell_size(size_t obj_size) const { return obj_size; }
 582 
 583   // Non product verification and debugging.
 584 #ifndef PRODUCT
 585   // Support for PromotionFailureALot.  Return true if it's time to cause a
 586   // promotion failure.  The no-argument version uses
 587   // this->_promotion_failure_alot_count as the counter.
 588   bool promotion_should_fail(volatile size_t* count);
 589   bool promotion_should_fail();
 590 
 591   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 592   // GC in which promotion failure occurred.
 593   void reset_promotion_should_fail(volatile size_t* count);
 594   void reset_promotion_should_fail();
 595 #endif  // #ifndef PRODUCT
 596 
 597 #ifdef ASSERT
 598   static int fired_fake_oom() {
 599     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 600   }
 601 #endif
< prev index next >