< prev index next >

src/share/vm/gc/shared/collectedHeap.hpp

Print this page
rev 13349 : imported patch deflate.patch


  32 #include "runtime/perfData.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "utilities/events.hpp"
  35 
  36 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
  37 // is an abstract class: there may be many different kinds of heaps.  This
  38 // class defines the functions that a heap must implement, and contains
  39 // infrastructure common to all heaps.
  40 
  41 class AdaptiveSizePolicy;
  42 class BarrierSet;
  43 class CollectorPolicy;
  44 class GCHeapSummary;
  45 class GCTimer;
  46 class GCTracer;
  47 class MetaspaceSummary;
  48 class Thread;
  49 class ThreadClosure;
  50 class VirtualSpaceSummary;
  51 class nmethod;

  52 
  53 class GCMessage : public FormatBuffer<1024> {
  54  public:
  55   bool is_before;
  56 
  57  public:
  58   GCMessage() {}
  59 };
  60 
  61 class CollectedHeap;
  62 
  63 class GCHeapLog : public EventLogBase<GCMessage> {
  64  private:
  65   void log_heap(CollectedHeap* heap, bool before);
  66 
  67  public:
  68   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  69 
  70   void log_heap_before(CollectedHeap* heap) {
  71     log_heap(heap, true);


 582   // Override with specific mechanism for each specialized heap type.
 583   virtual void register_nmethod(nmethod* nm);
 584   virtual void unregister_nmethod(nmethod* nm);
 585 
 586   // The following two methods are there to support object pinning for JNI critical
 587   // regions. They are called whenever a thread enters or leaves a JNI critical
 588   // region and requires an object not to move. Notice that there's another
 589   // mechanism for GCs to implement critical region (see gcLocker.hpp). The default
 590   // implementation does nothing.
 591   virtual void pin_object(oop o);
 592   virtual void unpin_object(oop o);
 593 
 594   void trace_heap_before_gc(const GCTracer* gc_tracer);
 595   void trace_heap_after_gc(const GCTracer* gc_tracer);
 596 
 597   // Heap verification
 598   virtual void verify(VerifyOption option) = 0;
 599 
 600   // Accumulate additional statistics from GCLABs.
 601   virtual void accumulate_statistics_all_gclabs();

























 602 
 603   // Non product verification and debugging.
 604 #ifndef PRODUCT
 605   // Support for PromotionFailureALot.  Return true if it's time to cause a
 606   // promotion failure.  The no-argument version uses
 607   // this->_promotion_failure_alot_count as the counter.
 608   inline bool promotion_should_fail(volatile size_t* count);
 609   inline bool promotion_should_fail();
 610 
 611   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 612   // GC in which promotion failure occurred.
 613   inline void reset_promotion_should_fail(volatile size_t* count);
 614   inline void reset_promotion_should_fail();
 615 #endif  // #ifndef PRODUCT
 616 
 617 #ifdef ASSERT
 618   static int fired_fake_oom() {
 619     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 620   }
 621 #endif




  32 #include "runtime/perfData.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "utilities/events.hpp"
  35 
  36 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
  37 // is an abstract class: there may be many different kinds of heaps.  This
  38 // class defines the functions that a heap must implement, and contains
  39 // infrastructure common to all heaps.
  40 
  41 class AdaptiveSizePolicy;
  42 class BarrierSet;
  43 class CollectorPolicy;
  44 class GCHeapSummary;
  45 class GCTimer;
  46 class GCTracer;
  47 class MetaspaceSummary;
  48 class Thread;
  49 class ThreadClosure;
  50 class VirtualSpaceSummary;
  51 class nmethod;
  52 class WorkGang;
  53 
  54 class GCMessage : public FormatBuffer<1024> {
  55  public:
  56   bool is_before;
  57 
  58  public:
  59   GCMessage() {}
  60 };
  61 
  62 class CollectedHeap;
  63 
  64 class GCHeapLog : public EventLogBase<GCMessage> {
  65  private:
  66   void log_heap(CollectedHeap* heap, bool before);
  67 
  68  public:
  69   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  70 
  71   void log_heap_before(CollectedHeap* heap) {
  72     log_heap(heap, true);


 583   // Override with specific mechanism for each specialized heap type.
 584   virtual void register_nmethod(nmethod* nm);
 585   virtual void unregister_nmethod(nmethod* nm);
 586 
 587   // The following two methods are there to support object pinning for JNI critical
 588   // regions. They are called whenever a thread enters or leaves a JNI critical
 589   // region and requires an object not to move. Notice that there's another
 590   // mechanism for GCs to implement critical region (see gcLocker.hpp). The default
 591   // implementation does nothing.
 592   virtual void pin_object(oop o);
 593   virtual void unpin_object(oop o);
 594 
 595   void trace_heap_before_gc(const GCTracer* gc_tracer);
 596   void trace_heap_after_gc(const GCTracer* gc_tracer);
 597 
 598   // Heap verification
 599   virtual void verify(VerifyOption option) = 0;
 600 
 601   // Accumulate additional statistics from GCLABs.
 602   virtual void accumulate_statistics_all_gclabs();
 603 
 604   // Return true if GC supports per-thread monitor deflation.
 605   // In this case, idle monitors will not get deflated when entering
 606   // a safepoint, but instead will get deflated when the GC
 607   // calls into Thread::oops_do() or Thread::possibly_parallel_oops_do().
 608   // This allows for better parallelization and cache behaviour.
 609   //
 610   // NOTICE that monitor deflation requires the mark words to be intact,
 611   // which means that this can only be supported by GCs that don't stow
 612   // away the mark word in order to temporarily store a forwarding pointer
 613   // to it.
 614   virtual bool supports_per_thread_monitor_deflation() const {
 615     return false;
 616   }
 617 
 618   // This is called by ObjectSynchronizer::deflate_idle_monitors() when
 619   // the above supports_per_thread_monitor_deflation() returns false,
 620   // or on special non-GC cleanup safepoints (even if the above returns true).
 621   // It gives the GC a chance to deflate idle monitors using its GC worker
 622   // threads, and thus support parallelization of monitor deflation.
 623   // The default implementation simply deflates idle monitors single-threaded,
 624   // using the calling (VM) thread.
 625   virtual void deflate_idle_monitors_all_threads();
 626 
 627   void parallel_deflate_idle_monitors(WorkGang* workers);
 628 
 629   // Non product verification and debugging.
 630 #ifndef PRODUCT
 631   // Support for PromotionFailureALot.  Return true if it's time to cause a
 632   // promotion failure.  The no-argument version uses
 633   // this->_promotion_failure_alot_count as the counter.
 634   inline bool promotion_should_fail(volatile size_t* count);
 635   inline bool promotion_should_fail();
 636 
 637   // Reset the PromotionFailureALot counters.  Should be called at the end of a
 638   // GC in which promotion failure occurred.
 639   inline void reset_promotion_should_fail(volatile size_t* count);
 640   inline void reset_promotion_should_fail();
 641 #endif  // #ifndef PRODUCT
 642 
 643 #ifdef ASSERT
 644   static int fired_fake_oom() {
 645     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
 646   }
 647 #endif


< prev index next >