< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7318 : imported patch foreground
rev 7319 : [mq]: foreground-review-stefank
rev 7320 : [mq]: foreground-review-kim
rev 7321 : [mq]: remove-RotateCMSCollectionTypes


 175 
 176 // This struct contains per-thread things necessary to support parallel
 177 // young-gen collection.
 178 class CMSParGCThreadState: public CHeapObj<mtGC> {
 179  public:
 180   CFLS_LAB lab;
 181   PromotionInfo promo;
 182 
 183   // Constructor.
 184   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 185     promo.setSpace(cfls);
 186   }
 187 };
 188 
 189 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 190      ReservedSpace rs, size_t initial_byte_size, int level,
 191      CardTableRS* ct, bool use_adaptive_freelists,
 192      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 193   CardGeneration(rs, initial_byte_size, level, ct),
 194   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 195   _debug_concurrent_cycle(true),
 196   _did_compact(false)
 197 {
 198   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 199   HeapWord* end    = (HeapWord*) _virtual_space.high();
 200 
 201   _direct_allocated_words = 0;
 202   NOT_PRODUCT(
 203     _numObjectsPromoted = 0;
 204     _numWordsPromoted = 0;
 205     _numObjectsAllocated = 0;
 206     _numWordsAllocated = 0;
 207   )
 208 
 209   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 210                                            use_adaptive_freelists,
 211                                            dictionaryChoice);
 212   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 213   if (_cmsSpace == NULL) {
 214     vm_exit_during_initialization(
 215       "CompactibleFreeListSpace allocation failure");


1228 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1229                                                    size_t size,
1230                                                    bool   tlab)
1231 {
1232   // We allow a STW collection only if a full
1233   // collection was requested.
1234   return full || should_allocate(size, tlab); // FIX ME !!!
1235   // This and promotion failure handling are connected at the
1236   // hip and should be fixed by untying them.
1237 }
1238 
1239 bool CMSCollector::shouldConcurrentCollect() {
1240   if (_full_gc_requested) {
1241     if (Verbose && PrintGCDetails) {
1242       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1243                              " gc request (or gc_locker)");
1244     }
1245     return true;
1246   }
1247 
1248   // For debugging purposes, change the type of collection.
1249   // Rotate between concurrent and stop-the-world full GCs.
1250   NOT_PRODUCT(
1251     if (RotateCMSCollectionTypes) {
1252       return _cmsGen->debug_concurrent_cycle();
1253     }
1254   )
1255 
1256   FreelistLocker x(this);
1257   // ------------------------------------------------------------------
1258   // Print out lots of information which affects the initiation of
1259   // a collection.
1260   if (PrintCMSInitiationStatistics && stats().valid()) {
1261     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1262     gclog_or_tty->stamp();
1263     gclog_or_tty->cr();
1264     stats().print_on(gclog_or_tty);
1265     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1266       stats().time_until_cms_gen_full());
1267     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1268     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1269                            _cmsGen->contiguous_available());
1270     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1271     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1272     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1273     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1274     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1275     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());


5805 }
5806 
5807 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5808   return addr >= _cmsSpace->nearLargestChunk();
5809 }
5810 
5811 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5812   return _cmsSpace->find_chunk_at_end();
5813 }
5814 
5815 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5816                                                     bool full) {
5817   // The next lower level has been collected.  Gather any statistics
5818   // that are of interest at this point.
5819   if (!full && (current_level + 1) == level()) {
5820     // Gather statistics on the young generation collection.
5821     collector()->stats().record_gc0_end(used());
5822   }
5823 }
5824 
5825 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
5826   if (PrintGCDetails && Verbose) {
5827     if (_debug_concurrent_cycle) {
5828       gclog_or_tty->print_cr("Rotate from concurrent to STW collections");
5829     } else {
5830       gclog_or_tty->print_cr("Rotate from STW to concurrent collections");
5831     }
5832   }
5833   _debug_concurrent_cycle = !_debug_concurrent_cycle;
5834 }
5835 
5836 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5837   // We iterate over the space(s) underlying this generation,
5838   // checking the mark bit map to see if the bits corresponding
5839   // to specific blocks are marked or not. Blocks that are
5840   // marked are live and are not swept up. All remaining blocks
5841   // are swept up, with coalescing on-the-fly as we sweep up
5842   // contiguous free and/or garbage blocks:
5843   // We need to ensure that the sweeper synchronizes with allocators
5844   // and stop-the-world collectors. In particular, the following
5845   // locks are used:
5846   // . CMS token: if this is held, a stop the world collection cannot occur
5847   // . freelistLock: if this is held no allocation can occur from this
5848   //                 generation by another thread
5849   // . bitMapLock: if this is held, no other thread can access or update
5850   //
5851 
5852   // Note that we need to hold the freelistLock if we use
5853   // block iterate below; else the iterator might go awry if
5854   // a mutator (or promotion) causes block contents to change
5855   // (for instance if the allocator divvies up a block).


5935 
5936         ConcurrentMarkSweepThread::synchronize(true);
5937         bitMapLock()->lock_without_safepoint_check();
5938         startTimer();
5939       }
5940       curAddr = chunk.end();
5941     }
5942     // A successful mostly concurrent collection has been done.
5943     // Because only the full (i.e., concurrent mode failure) collections
5944     // are being measured for gc overhead limits, clean the "near" flag
5945     // and count.
5946     size_policy()->reset_gc_overhead_limit_count();
5947     _collectorState = Idling;
5948   } else {
5949     // already have the lock
5950     assert(_collectorState == Resetting, "just checking");
5951     assert_lock_strong(bitMapLock());
5952     _markBitMap.clear_all();
5953     _collectorState = Idling;
5954   }
5955 
5956   NOT_PRODUCT(
5957     if (RotateCMSCollectionTypes) {
5958       _cmsGen->rotate_debug_collection_type();
5959     }
5960   )
5961 
5962   register_gc_end();
5963 }
5964 
5965 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5966   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
5967   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5968   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5969   TraceCollectorStats tcs(counters());
5970 
5971   switch (op) {
5972     case CMS_op_checkpointRootsInitial: {
5973       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5974       checkpointRootsInitial();
5975       if (PrintGC) {
5976         _cmsGen->printOccupancy("initial-mark");
5977       }
5978       break;
5979     }
5980     case CMS_op_checkpointRootsFinal: {




 175 
 176 // This struct contains per-thread things necessary to support parallel
 177 // young-gen collection.
 178 class CMSParGCThreadState: public CHeapObj<mtGC> {
 179  public:
 180   CFLS_LAB lab;
 181   PromotionInfo promo;
 182 
 183   // Constructor.
 184   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 185     promo.setSpace(cfls);
 186   }
 187 };
 188 
 189 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 190      ReservedSpace rs, size_t initial_byte_size, int level,
 191      CardTableRS* ct, bool use_adaptive_freelists,
 192      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 193   CardGeneration(rs, initial_byte_size, level, ct),
 194   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),

 195   _did_compact(false)
 196 {
 197   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 198   HeapWord* end    = (HeapWord*) _virtual_space.high();
 199 
 200   _direct_allocated_words = 0;
 201   NOT_PRODUCT(
 202     _numObjectsPromoted = 0;
 203     _numWordsPromoted = 0;
 204     _numObjectsAllocated = 0;
 205     _numWordsAllocated = 0;
 206   )
 207 
 208   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 209                                            use_adaptive_freelists,
 210                                            dictionaryChoice);
 211   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 212   if (_cmsSpace == NULL) {
 213     vm_exit_during_initialization(
 214       "CompactibleFreeListSpace allocation failure");


1227 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1228                                                    size_t size,
1229                                                    bool   tlab)
1230 {
1231   // We allow a STW collection only if a full
1232   // collection was requested.
1233   return full || should_allocate(size, tlab); // FIX ME !!!
1234   // This and promotion failure handling are connected at the
1235   // hip and should be fixed by untying them.
1236 }
1237 
1238 bool CMSCollector::shouldConcurrentCollect() {
1239   if (_full_gc_requested) {
1240     if (Verbose && PrintGCDetails) {
1241       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1242                              " gc request (or gc_locker)");
1243     }
1244     return true;
1245   }
1246 








1247   FreelistLocker x(this);
1248   // ------------------------------------------------------------------
1249   // Print out lots of information which affects the initiation of
1250   // a collection.
1251   if (PrintCMSInitiationStatistics && stats().valid()) {
1252     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1253     gclog_or_tty->stamp();
1254     gclog_or_tty->cr();
1255     stats().print_on(gclog_or_tty);
1256     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1257       stats().time_until_cms_gen_full());
1258     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1259     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1260                            _cmsGen->contiguous_available());
1261     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1262     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1263     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1264     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1265     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1266     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());


5796 }
5797 
5798 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5799   return addr >= _cmsSpace->nearLargestChunk();
5800 }
5801 
5802 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5803   return _cmsSpace->find_chunk_at_end();
5804 }
5805 
5806 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5807                                                     bool full) {
5808   // The next lower level has been collected.  Gather any statistics
5809   // that are of interest at this point.
5810   if (!full && (current_level + 1) == level()) {
5811     // Gather statistics on the young generation collection.
5812     collector()->stats().record_gc0_end(used());
5813   }
5814 }
5815 











5816 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5817   // We iterate over the space(s) underlying this generation,
5818   // checking the mark bit map to see if the bits corresponding
5819   // to specific blocks are marked or not. Blocks that are
5820   // marked are live and are not swept up. All remaining blocks
5821   // are swept up, with coalescing on-the-fly as we sweep up
5822   // contiguous free and/or garbage blocks:
5823   // We need to ensure that the sweeper synchronizes with allocators
5824   // and stop-the-world collectors. In particular, the following
5825   // locks are used:
5826   // . CMS token: if this is held, a stop the world collection cannot occur
5827   // . freelistLock: if this is held no allocation can occur from this
5828   //                 generation by another thread
5829   // . bitMapLock: if this is held, no other thread can access or update
5830   //
5831 
5832   // Note that we need to hold the freelistLock if we use
5833   // block iterate below; else the iterator might go awry if
5834   // a mutator (or promotion) causes block contents to change
5835   // (for instance if the allocator divvies up a block).


5915 
5916         ConcurrentMarkSweepThread::synchronize(true);
5917         bitMapLock()->lock_without_safepoint_check();
5918         startTimer();
5919       }
5920       curAddr = chunk.end();
5921     }
5922     // A successful mostly concurrent collection has been done.
5923     // Because only the full (i.e., concurrent mode failure) collections
5924     // are being measured for gc overhead limits, clean the "near" flag
5925     // and count.
5926     size_policy()->reset_gc_overhead_limit_count();
5927     _collectorState = Idling;
5928   } else {
5929     // already have the lock
5930     assert(_collectorState == Resetting, "just checking");
5931     assert_lock_strong(bitMapLock());
5932     _markBitMap.clear_all();
5933     _collectorState = Idling;
5934   }






5935 
5936   register_gc_end();
5937 }
5938 
5939 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5940   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
5941   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5942   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5943   TraceCollectorStats tcs(counters());
5944 
5945   switch (op) {
5946     case CMS_op_checkpointRootsInitial: {
5947       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5948       checkpointRootsInitial();
5949       if (PrintGC) {
5950         _cmsGen->printOccupancy("initial-mark");
5951       }
5952       break;
5953     }
5954     case CMS_op_checkpointRootsFinal: {


< prev index next >