180 //////////////////////////////////////////////////////////////////
181 // Concurrent Mark-Sweep Generation /////////////////////////////
182 //////////////////////////////////////////////////////////////////
183
184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
185
186 // This struct contains per-thread things necessary to support parallel
187 // young-gen collection.
188 class CMSParGCThreadState: public CHeapObj<mtGC> {
189 public:
190 CompactibleFreeListSpaceLAB lab;
191 PromotionInfo promo;
192
193 // Constructor.
194 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
195 promo.setSpace(cfls);
196 }
197 };
198
199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
200 ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
201 CardGeneration(rs, initial_byte_size, ct),
202 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
203 _did_compact(false)
204 {
205 HeapWord* bottom = (HeapWord*) _virtual_space.low();
206 HeapWord* end = (HeapWord*) _virtual_space.high();
207
208 _direct_allocated_words = 0;
209 NOT_PRODUCT(
210 _numObjectsPromoted = 0;
211 _numWordsPromoted = 0;
212 _numObjectsAllocated = 0;
213 _numWordsAllocated = 0;
214 )
215
216 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
217 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
218 _cmsSpace->_old_gen = this;
219
220 _gc_stats = new CMSGCStats();
221
8099 assert(!res || !_mark_stack->isEmpty(),
8100 "If we took something, it should now be on our stack");
8101 return res;
8102 }
8103
8104 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8105 size_t res = _sp->block_size_no_stall(addr, _collector);
8106 if (_sp->block_is_obj(addr)) {
8107 if (_live_bit_map->isMarked(addr)) {
8108 // It can't have been dead in a previous cycle
8109 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8110 } else {
8111 _dead_bit_map->mark(addr); // mark the dead object
8112 }
8113 }
8114 // Could be 0, if the block size could not be computed without stalling.
8115 return res;
8116 }
8117
8118 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8119
8120 switch (phase) {
8121 case CMSCollector::InitialMarking:
8122 initialize(true /* fullGC */ ,
8123 cause /* cause of the GC */,
8124 true /* recordGCBeginTime */,
8125 true /* recordPreGCUsage */,
8126 false /* recordPeakUsage */,
8127 false /* recordPostGCusage */,
8128 true /* recordAccumulatedGCTime */,
8129 false /* recordGCEndTime */,
8130 false /* countCollection */ );
8131 break;
8132
8133 case CMSCollector::FinalMarking:
8134 initialize(true /* fullGC */ ,
8135 cause /* cause of the GC */,
8136 false /* recordGCBeginTime */,
8137 false /* recordPreGCUsage */,
8138 false /* recordPeakUsage */,
8139 false /* recordPostGCusage */,
8140 true /* recordAccumulatedGCTime */,
8141 false /* recordGCEndTime */,
8142 false /* countCollection */ );
8143 break;
8144
8145 case CMSCollector::Sweeping:
8146 initialize(true /* fullGC */ ,
8147 cause /* cause of the GC */,
8148 false /* recordGCBeginTime */,
8149 false /* recordPreGCUsage */,
8150 true /* recordPeakUsage */,
8151 true /* recordPostGCusage */,
8152 false /* recordAccumulatedGCTime */,
8153 true /* recordGCEndTime */,
8154 true /* countCollection */ );
8155 break;
8156
8157 default:
8158 ShouldNotReachHere();
8159 }
8160 }
|
180 //////////////////////////////////////////////////////////////////
181 // Concurrent Mark-Sweep Generation /////////////////////////////
182 //////////////////////////////////////////////////////////////////
183
184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
185
186 // This struct contains per-thread things necessary to support parallel
187 // young-gen collection.
188 class CMSParGCThreadState: public CHeapObj<mtGC> {
189 public:
190 CompactibleFreeListSpaceLAB lab;
191 PromotionInfo promo;
192
193 // Constructor.
194 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
195 promo.setSpace(cfls);
196 }
197 };
198
199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
200 ReservedSpace rs, size_t initial_byte_size, GCMemoryManager* mem_mgr, CardTableRS* ct) :
201 CardGeneration(rs, initial_byte_size, mem_mgr, ct),
202 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
203 _did_compact(false)
204 {
205 HeapWord* bottom = (HeapWord*) _virtual_space.low();
206 HeapWord* end = (HeapWord*) _virtual_space.high();
207
208 _direct_allocated_words = 0;
209 NOT_PRODUCT(
210 _numObjectsPromoted = 0;
211 _numWordsPromoted = 0;
212 _numObjectsAllocated = 0;
213 _numWordsAllocated = 0;
214 )
215
216 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
217 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
218 _cmsSpace->_old_gen = this;
219
220 _gc_stats = new CMSGCStats();
221
8099 assert(!res || !_mark_stack->isEmpty(),
8100 "If we took something, it should now be on our stack");
8101 return res;
8102 }
8103
8104 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8105 size_t res = _sp->block_size_no_stall(addr, _collector);
8106 if (_sp->block_is_obj(addr)) {
8107 if (_live_bit_map->isMarked(addr)) {
8108 // It can't have been dead in a previous cycle
8109 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8110 } else {
8111 _dead_bit_map->mark(addr); // mark the dead object
8112 }
8113 }
8114 // Could be 0, if the block size could not be computed without stalling.
8115 return res;
8116 }
8117
8118 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8119 GCMemoryManager* mgr = CMSHeap::heap()->major_mgr();
8120 switch (phase) {
8121 case CMSCollector::InitialMarking:
8122 initialize(mgr /* major */ ,
8123 cause /* cause of the GC */,
8124 true /* recordGCBeginTime */,
8125 true /* recordPreGCUsage */,
8126 false /* recordPeakUsage */,
8127 false /* recordPostGCusage */,
8128 true /* recordAccumulatedGCTime */,
8129 false /* recordGCEndTime */,
8130 false /* countCollection */ );
8131 break;
8132
8133 case CMSCollector::FinalMarking:
8134 initialize(mgr /* major */ ,
8135 cause /* cause of the GC */,
8136 false /* recordGCBeginTime */,
8137 false /* recordPreGCUsage */,
8138 false /* recordPeakUsage */,
8139 false /* recordPostGCusage */,
8140 true /* recordAccumulatedGCTime */,
8141 false /* recordGCEndTime */,
8142 false /* countCollection */ );
8143 break;
8144
8145 case CMSCollector::Sweeping:
8146 initialize(mgr /* major */ ,
8147 cause /* cause of the GC */,
8148 false /* recordGCBeginTime */,
8149 false /* recordPreGCUsage */,
8150 true /* recordPeakUsage */,
8151 true /* recordPostGCusage */,
8152 false /* recordAccumulatedGCTime */,
8153 true /* recordGCEndTime */,
8154 true /* countCollection */ );
8155 break;
8156
8157 default:
8158 ShouldNotReachHere();
8159 }
8160 }
|