85 // ParallelScavengeHeap
86 //
87 class CollectedHeap : public CHeapObj<mtInternal> {
88 friend class VMStructs;
89 friend class JVMCIVMStructs;
90 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
91
92 private:
93 #ifdef ASSERT
94 static int _fire_out_of_memory_count;
95 #endif
96
97 GCHeapLog* _gc_heap_log;
98
99 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
100 // or INCLUDE_JVMCI is being used
101 bool _defer_initial_card_mark;
102
103 MemRegion _reserved;
104
105 protected:
106 BarrierSet* _barrier_set;
107 bool _is_gc_active;
108
109 // Used for filler objects (static, but initialized in ctor).
110 static size_t _filler_array_max_size;
111
112 unsigned int _total_collections; // ... started
113 unsigned int _total_full_collections; // ... started
114 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
115 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
116
117 // Reason for current garbage collection. Should be set to
118 // a value reflecting no collection between collections.
119 GCCause::Cause _gc_cause;
120 GCCause::Cause _gc_lastcause;
121 PerfStringVariable* _perf_gc_cause;
122 PerfStringVariable* _perf_gc_lastcause;
123
124 // Constructor
595 // request may be active at a time. Phases are designated by name;
596 // the set of names and their meaning is GC-specific. Once the
597 // requested phase has been reached, the collector will attempt to
598 // avoid transitioning to a new phase until a new request is made.
599 // [Note: A collector might not be able to remain in a given phase.
600 // For example, a full collection might cancel an in-progress
601 // concurrent collection.]
602 //
603 // Returns true when the phase is reached. Returns false for an
604 // unknown phase. The default implementation returns false.
605 virtual bool request_concurrent_phase(const char* phase);
606
607 // Provides a thread pool to SafepointSynchronize to use
608 // for parallel safepoint cleanup.
609 // GCs that use a GC worker thread pool may want to share
610 // it for use during safepoint cleanup. This is only possible
611 // if the GC can pause and resume concurrent work (e.g. G1
612 // concurrent marking) for an intermittent non-GC safepoint.
613 // If this method returns NULL, SafepointSynchronize will
614 // perform cleanup tasks serially in the VMThread.
615 virtual WorkGang* get_safepoint_workers() { return NULL; }
616
617 // Non product verification and debugging.
618 #ifndef PRODUCT
619 // Support for PromotionFailureALot. Return true if it's time to cause a
620 // promotion failure. The no-argument version uses
621 // this->_promotion_failure_alot_count as the counter.
622 inline bool promotion_should_fail(volatile size_t* count);
623 inline bool promotion_should_fail();
624
625 // Reset the PromotionFailureALot counters. Should be called at the end of a
626 // GC in which promotion failure occurred.
627 inline void reset_promotion_should_fail(volatile size_t* count);
628 inline void reset_promotion_should_fail();
629 #endif // #ifndef PRODUCT
630
631 #ifdef ASSERT
632 static int fired_fake_oom() {
633 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
634 }
635 #endif
|
85 // ParallelScavengeHeap
86 //
87 class CollectedHeap : public CHeapObj<mtInternal> {
88 friend class VMStructs;
89 friend class JVMCIVMStructs;
90 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
91
92 private:
93 #ifdef ASSERT
94 static int _fire_out_of_memory_count;
95 #endif
96
97 GCHeapLog* _gc_heap_log;
98
99 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
100 // or INCLUDE_JVMCI is being used
101 bool _defer_initial_card_mark;
102
103 MemRegion _reserved;
104
105 WorkGang* _safepoint_workers;
106
107 protected:
108 BarrierSet* _barrier_set;
109 bool _is_gc_active;
110
111 // Used for filler objects (static, but initialized in ctor).
112 static size_t _filler_array_max_size;
113
114 unsigned int _total_collections; // ... started
115 unsigned int _total_full_collections; // ... started
116 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
117 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
118
119 // Reason for current garbage collection. Should be set to
120 // a value reflecting no collection between collections.
121 GCCause::Cause _gc_cause;
122 GCCause::Cause _gc_lastcause;
123 PerfStringVariable* _perf_gc_cause;
124 PerfStringVariable* _perf_gc_lastcause;
125
126 // Constructor
597 // request may be active at a time. Phases are designated by name;
598 // the set of names and their meaning is GC-specific. Once the
599 // requested phase has been reached, the collector will attempt to
600 // avoid transitioning to a new phase until a new request is made.
601 // [Note: A collector might not be able to remain in a given phase.
602 // For example, a full collection might cancel an in-progress
603 // concurrent collection.]
604 //
605 // Returns true when the phase is reached. Returns false for an
606 // unknown phase. The default implementation returns false.
607 virtual bool request_concurrent_phase(const char* phase);
608
609 // Provides a thread pool to SafepointSynchronize to use
610 // for parallel safepoint cleanup.
611 // GCs that use a GC worker thread pool may want to share
612 // it for use during safepoint cleanup. This is only possible
613 // if the GC can pause and resume concurrent work (e.g. G1
614 // concurrent marking) for an intermittent non-GC safepoint.
615 // If this method returns NULL, SafepointSynchronize will
616 // perform cleanup tasks serially in the VMThread.
617 virtual WorkGang* get_safepoint_workers() { return _safepoint_workers; }
618
619 // Non product verification and debugging.
620 #ifndef PRODUCT
621 // Support for PromotionFailureALot. Return true if it's time to cause a
622 // promotion failure. The no-argument version uses
623 // this->_promotion_failure_alot_count as the counter.
624 inline bool promotion_should_fail(volatile size_t* count);
625 inline bool promotion_should_fail();
626
627 // Reset the PromotionFailureALot counters. Should be called at the end of a
628 // GC in which promotion failure occurred.
629 inline void reset_promotion_should_fail(volatile size_t* count);
630 inline void reset_promotion_should_fail();
631 #endif // #ifndef PRODUCT
632
633 #ifdef ASSERT
634 static int fired_fake_oom() {
635 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
636 }
637 #endif
|