src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 675 void ConcurrentMarkSweepGeneration::print_statistics() {
 676   cmsSpace()->printFLCensus(0);
 677 }
 678 #endif
 679 
 680 size_t
 681 ConcurrentMarkSweepGeneration::contiguous_available() const {
 682   // dld proposes an improvement in precision here. If the committed
 683   // part of the space ends in a free block we should add that to
 684   // uncommitted size in the calculation below. Will make this
 685   // change later, staying with the approximation below for the
 686   // time being. -- ysr.
 687   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 688 }
 689 
 690 size_t
 691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 692   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 693 }
 694 




 695 size_t ConcurrentMarkSweepGeneration::max_available() const {
 696   return free() + _virtual_space.uncommitted_size();
 697 }
 698 
 699 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 700   size_t available = max_available();
 701   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 702   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 703   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 704                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 705   return res;
 706 }
 707 
 708 // At a promotion failure dump information on block layout in heap
 709 // (cms old generation).
 710 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 711   Log(gc, promotion) log;
 712   if (log.is_trace()) {
 713     LogStream ls(log.trace());
 714     cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);


1506                                          max_eden_size,
1507                                          full,
1508                                          gc_cause,
1509                                          heap->soft_ref_policy());
1510 
1511   // Reset the expansion cause, now that we just completed
1512   // a collection cycle.
1513   clear_expansion_cause();
1514   _foregroundGCIsActive = false;
1515   return;
1516 }
1517 
1518 // Resize the tenured generation
1519 // after obtaining the free list locks for the
1520 // two generations.
1521 void CMSCollector::compute_new_size() {
1522   assert_locked_or_safepoint(Heap_lock);
1523   FreelistLocker z(this);
1524   MetaspaceGC::compute_new_size();
1525   _cmsGen->compute_new_size_free_list();


1526 }
1527 
1528 // A work method used by the foreground collector to do
1529 // a mark-sweep-compact.
1530 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1531   CMSHeap* heap = CMSHeap::heap();
1532 
1533   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1534   gc_timer->register_gc_start();
1535 
1536   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1537   gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
1538 
1539   heap->pre_full_gc_dump(gc_timer);
1540 
1541   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1542 
1543   // Temporarily widen the span of the weak reference processing to
1544   // the entire heap.
1545   MemRegion new_span(CMSHeap::heap()->reserved_region());


2034   // there are any modified oops in the class. The remark phase also needs
2035   // that information. Tell the young collection to save the union of all
2036   // modified klasses.
2037   if (duringMarking) {
2038     _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2039   }
2040 
2041   bool registerClosure = duringMarking;
2042 
2043   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2044 
2045   if (!full) {
2046     stats().record_gc0_begin();
2047   }
2048 }
2049 
2050 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2051 
2052   _capacity_at_prologue = capacity();
2053   _used_at_prologue = used();

2054 
2055   // We enable promotion tracking so that card-scanning can recognize
2056   // which objects have been promoted during this GC and skip them.
2057   for (uint i = 0; i < ParallelGCThreads; i++) {
2058     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2059   }
2060 
2061   // Delegate to CMScollector which knows how to coordinate between
2062   // this and any other CMS generations that it is responsible for
2063   // collecting.
2064   collector()->gc_prologue(full);
2065 }
2066 
2067 // This is a "private" interface for use by this generation's CMSCollector.
2068 // Not to be called directly by any other entity (for instance,
2069 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2070 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2071   bool registerClosure, ModUnionClosure* modUnionClosure) {
2072   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2073   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,


2106     // We have already been invoked; this is a gc_epilogue delegation
2107     // from yet another CMS generation that we are responsible for, just
2108     // ignore it since all relevant work has already been done.
2109     return;
2110   }
2111   assert(haveFreelistLocks(), "must have freelist locks");
2112   assert_lock_strong(bitMapLock());
2113 
2114   _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2115 
2116   _cmsGen->gc_epilogue_work(full);
2117 
2118   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2119     // in case sampling was not already enabled, enable it
2120     _start_sampling = true;
2121   }
2122   // reset _eden_chunk_array so sampling starts afresh
2123   _eden_chunk_index = 0;
2124 
2125   size_t cms_used   = _cmsGen->cmsSpace()->used();

2126 
2127   // update performance counters - this uses a special version of
2128   // update_counters() that allows the utilization to be passed as a
2129   // parameter, avoiding multiple calls to used().
2130   //
2131   _cmsGen->update_counters(cms_used);
2132 
2133   bitMapLock()->unlock();
2134   releaseFreelistLocks();
2135 
2136   if (!CleanChunkPoolAsync) {
2137     Chunk::clean_chunk_pool();
2138   }
2139 
2140   set_did_compact(false);
2141   _between_prologue_and_epilogue = false;  // ready for next cycle
2142 }
2143 
2144 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2145   collector()->gc_epilogue(full);


2799 // be approximate -- we'll do a catch up phase subsequently.]
2800 void CMSCollector::checkpointRootsInitial() {
2801   assert(_collectorState == InitialMarking, "Wrong collector state");
2802   check_correct_thread_executing();
2803   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
2804 
2805   save_heap_summary();
2806   report_heap_summary(GCWhen::BeforeGC);
2807 
2808   ReferenceProcessor* rp = ref_processor();
2809   assert(_restart_addr == NULL, "Control point invariant");
2810   {
2811     // acquire locks for subsequent manipulations
2812     MutexLocker x(bitMapLock(),
2813                   Mutex::_no_safepoint_check_flag);
2814     checkpointRootsInitialWork();
2815     // enable ("weak") refs discovery
2816     rp->enable_discovery();
2817     _collectorState = Marking;
2818   }


2819 }
2820 
2821 void CMSCollector::checkpointRootsInitialWork() {
2822   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2823   assert(_collectorState == InitialMarking, "just checking");
2824 
2825   // Already have locks.
2826   assert_lock_strong(bitMapLock());
2827   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2828 
2829   // Setup the verification and class unloading state for this
2830   // CMS collection cycle.
2831   setup_cms_unloading_and_verification_state();
2832 
2833   GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2834 
2835   // Reset all the PLAB chunk arrays if necessary.
2836   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2837     reset_survivor_plab_arrays();
2838   }


4160   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4161                 _young_gen->used() / K, _young_gen->capacity() / K);
4162   {
4163     if (CMSScavengeBeforeRemark) {
4164       CMSHeap* heap = CMSHeap::heap();
4165       // Temporarily set flag to false, GCH->do_collection will
4166       // expect it to be false and set to true
4167       FlagSetting fl(heap->_is_gc_active, false);
4168 
4169       heap->do_collection(true,                      // full (i.e. force, see below)
4170                           false,                     // !clear_all_soft_refs
4171                           0,                         // size
4172                           false,                     // is_tlab
4173                           GenCollectedHeap::YoungGen // type
4174         );
4175     }
4176     FreelistLocker x(this);
4177     MutexLocker y(bitMapLock(),
4178                   Mutex::_no_safepoint_check_flag);
4179     checkpointRootsFinalWork();

4180   }
4181   verify_work_stacks_empty();
4182   verify_overflow_empty();
4183 }
4184 
4185 void CMSCollector::checkpointRootsFinalWork() {
4186   GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4187 
4188   assert(haveFreelistLocks(), "must have free list locks");
4189   assert_lock_strong(bitMapLock());
4190 
4191   ResourceMark rm;
4192   HandleMark   hm;
4193 
4194   CMSHeap* heap = CMSHeap::heap();
4195 
4196   assert(haveFreelistLocks(), "must have free list locks");
4197   assert_lock_strong(bitMapLock());
4198 
4199   // We might assume that we need not fill TLAB's when


5320 
5321   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5322   _intra_sweep_timer.reset();
5323   _intra_sweep_timer.start();
5324   {
5325     GCTraceCPUTime tcpu;
5326     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5327     // First sweep the old gen
5328     {
5329       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5330                                bitMapLock());
5331       sweepWork(_cmsGen);
5332     }
5333 
5334     // Update Universe::_heap_*_at_gc figures.
5335     // We need all the free list locks to make the abstract state
5336     // transition from Sweeping to Resetting. See detailed note
5337     // further below.
5338     {
5339       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());

5340       // Update heap occupancy information which is used as
5341       // input to soft ref clearing policy at the next gc.
5342       Universe::update_heap_info_at_gc();




5343       _collectorState = Resizing;
5344     }
5345   }
5346   verify_work_stacks_empty();
5347   verify_overflow_empty();
5348 
5349   if (should_unload_classes()) {
5350     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5351     // requires that the virtual spaces are stable and not deleted.
5352     ClassLoaderDataGraph::set_should_purge(true);
5353   }
5354 
5355   _intra_sweep_timer.stop();
5356   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5357 
5358   _inter_sweep_timer.reset();
5359   _inter_sweep_timer.start();
5360 
5361   // We need to use a monotonically non-decreasing time in ms
5362   // or we will see time-warp warnings and os::javaTimeMillis()


5411   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5412 }
5413 
5414 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5415   return addr >= _cmsSpace->nearLargestChunk();
5416 }
5417 
5418 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5419   return _cmsSpace->find_chunk_at_end();
5420 }
5421 
5422 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5423                                                     bool full) {
5424   // If the young generation has been collected, gather any statistics
5425   // that are of interest at this point.
5426   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
5427   if (!full && current_is_young) {
5428     // Gather statistics on the young generation collection.
5429     collector()->stats().record_gc0_end(used());
5430   }

5431 }
5432 
5433 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5434   // We iterate over the space(s) underlying this generation,
5435   // checking the mark bit map to see if the bits corresponding
5436   // to specific blocks are marked or not. Blocks that are
5437   // marked are live and are not swept up. All remaining blocks
5438   // are swept up, with coalescing on-the-fly as we sweep up
5439   // contiguous free and/or garbage blocks:
5440   // We need to ensure that the sweeper synchronizes with allocators
5441   // and stop-the-world collectors. In particular, the following
5442   // locks are used:
5443   // . CMS token: if this is held, a stop the world collection cannot occur
5444   // . freelistLock: if this is held no allocation can occur from this
5445   //                 generation by another thread
5446   // . bitMapLock: if this is held, no other thread can access or update
5447   //
5448 
5449   // Note that we need to hold the freelistLock if we use
5450   // block iterate below; else the iterator might go awry if




 675 void ConcurrentMarkSweepGeneration::print_statistics() {
 676   cmsSpace()->printFLCensus(0);
 677 }
 678 #endif
 679 
 680 size_t
 681 ConcurrentMarkSweepGeneration::contiguous_available() const {
 682   // dld proposes an improvement in precision here. If the committed
 683   // part of the space ends in a free block we should add that to
 684   // uncommitted size in the calculation below. Will make this
 685   // change later, staying with the approximation below for the
 686   // time being. -- ysr.
 687   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 688 }
 689 
 690 size_t
 691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 692   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 693 }
 694 
 695 size_t ConcurrentMarkSweepGeneration::used_stable() const {
 696   return cmsSpace()->used_stable();
 697 }
 698 
 699 size_t ConcurrentMarkSweepGeneration::max_available() const {
 700   return free() + _virtual_space.uncommitted_size();
 701 }
 702 
 703 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 704   size_t available = max_available();
 705   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 706   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 707   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 708                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 709   return res;
 710 }
 711 
 712 // At a promotion failure dump information on block layout in heap
 713 // (cms old generation).
 714 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 715   Log(gc, promotion) log;
 716   if (log.is_trace()) {
 717     LogStream ls(log.trace());
 718     cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);


1510                                          max_eden_size,
1511                                          full,
1512                                          gc_cause,
1513                                          heap->soft_ref_policy());
1514 
1515   // Reset the expansion cause, now that we just completed
1516   // a collection cycle.
1517   clear_expansion_cause();
1518   _foregroundGCIsActive = false;
1519   return;
1520 }
1521 
1522 // Resize the tenured generation
1523 // after obtaining the free list locks for the
1524 // two generations.
1525 void CMSCollector::compute_new_size() {
1526   assert_locked_or_safepoint(Heap_lock);
1527   FreelistLocker z(this);
1528   MetaspaceGC::compute_new_size();
1529   _cmsGen->compute_new_size_free_list();
1530   // recalculate CMS used space after CMS collection
1531   _cmsGen->cmsSpace()->recalculate_used_stable();
1532 }
1533 
1534 // A work method used by the foreground collector to do
1535 // a mark-sweep-compact.
1536 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1537   CMSHeap* heap = CMSHeap::heap();
1538 
1539   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1540   gc_timer->register_gc_start();
1541 
1542   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1543   gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
1544 
1545   heap->pre_full_gc_dump(gc_timer);
1546 
1547   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1548 
1549   // Temporarily widen the span of the weak reference processing to
1550   // the entire heap.
1551   MemRegion new_span(CMSHeap::heap()->reserved_region());


2040   // there are any modified oops in the class. The remark phase also needs
2041   // that information. Tell the young collection to save the union of all
2042   // modified klasses.
2043   if (duringMarking) {
2044     _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2045   }
2046 
2047   bool registerClosure = duringMarking;
2048 
2049   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2050 
2051   if (!full) {
2052     stats().record_gc0_begin();
2053   }
2054 }
2055 
2056 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2057 
2058   _capacity_at_prologue = capacity();
2059   _used_at_prologue = used();
2060   _cmsSpace->recalculate_used_stable();
2061 
2062   // We enable promotion tracking so that card-scanning can recognize
2063   // which objects have been promoted during this GC and skip them.
2064   for (uint i = 0; i < ParallelGCThreads; i++) {
2065     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2066   }
2067 
2068   // Delegate to CMScollector which knows how to coordinate between
2069   // this and any other CMS generations that it is responsible for
2070   // collecting.
2071   collector()->gc_prologue(full);
2072 }
2073 
2074 // This is a "private" interface for use by this generation's CMSCollector.
2075 // Not to be called directly by any other entity (for instance,
2076 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2077 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2078   bool registerClosure, ModUnionClosure* modUnionClosure) {
2079   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2080   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,


2113     // We have already been invoked; this is a gc_epilogue delegation
2114     // from yet another CMS generation that we are responsible for, just
2115     // ignore it since all relevant work has already been done.
2116     return;
2117   }
2118   assert(haveFreelistLocks(), "must have freelist locks");
2119   assert_lock_strong(bitMapLock());
2120 
2121   _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2122 
2123   _cmsGen->gc_epilogue_work(full);
2124 
2125   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2126     // in case sampling was not already enabled, enable it
2127     _start_sampling = true;
2128   }
2129   // reset _eden_chunk_array so sampling starts afresh
2130   _eden_chunk_index = 0;
2131 
2132   size_t cms_used   = _cmsGen->cmsSpace()->used();
2133   _cmsGen->cmsSpace()->recalculate_used_stable();
2134 
2135   // update performance counters - this uses a special version of
2136   // update_counters() that allows the utilization to be passed as a
2137   // parameter, avoiding multiple calls to used().
2138   //
2139   _cmsGen->update_counters(cms_used);
2140 
2141   bitMapLock()->unlock();
2142   releaseFreelistLocks();
2143 
2144   if (!CleanChunkPoolAsync) {
2145     Chunk::clean_chunk_pool();
2146   }
2147 
2148   set_did_compact(false);
2149   _between_prologue_and_epilogue = false;  // ready for next cycle
2150 }
2151 
2152 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2153   collector()->gc_epilogue(full);


2807 // be approximate -- we'll do a catch up phase subsequently.]
2808 void CMSCollector::checkpointRootsInitial() {
2809   assert(_collectorState == InitialMarking, "Wrong collector state");
2810   check_correct_thread_executing();
2811   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
2812 
2813   save_heap_summary();
2814   report_heap_summary(GCWhen::BeforeGC);
2815 
2816   ReferenceProcessor* rp = ref_processor();
2817   assert(_restart_addr == NULL, "Control point invariant");
2818   {
2819     // acquire locks for subsequent manipulations
2820     MutexLocker x(bitMapLock(),
2821                   Mutex::_no_safepoint_check_flag);
2822     checkpointRootsInitialWork();
2823     // enable ("weak") refs discovery
2824     rp->enable_discovery();
2825     _collectorState = Marking;
2826   }
2827 
2828   _cmsGen->cmsSpace()->recalculate_used_stable();
2829 }
2830 
2831 void CMSCollector::checkpointRootsInitialWork() {
2832   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2833   assert(_collectorState == InitialMarking, "just checking");
2834 
2835   // Already have locks.
2836   assert_lock_strong(bitMapLock());
2837   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2838 
2839   // Setup the verification and class unloading state for this
2840   // CMS collection cycle.
2841   setup_cms_unloading_and_verification_state();
2842 
2843   GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2844 
2845   // Reset all the PLAB chunk arrays if necessary.
2846   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2847     reset_survivor_plab_arrays();
2848   }


4170   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4171                 _young_gen->used() / K, _young_gen->capacity() / K);
4172   {
4173     if (CMSScavengeBeforeRemark) {
4174       CMSHeap* heap = CMSHeap::heap();
4175       // Temporarily set flag to false, GCH->do_collection will
4176       // expect it to be false and set to true
4177       FlagSetting fl(heap->_is_gc_active, false);
4178 
4179       heap->do_collection(true,                      // full (i.e. force, see below)
4180                           false,                     // !clear_all_soft_refs
4181                           0,                         // size
4182                           false,                     // is_tlab
4183                           GenCollectedHeap::YoungGen // type
4184         );
4185     }
4186     FreelistLocker x(this);
4187     MutexLocker y(bitMapLock(),
4188                   Mutex::_no_safepoint_check_flag);
4189     checkpointRootsFinalWork();
4190     _cmsGen->cmsSpace()->recalculate_used_stable();
4191   }
4192   verify_work_stacks_empty();
4193   verify_overflow_empty();
4194 }
4195 
4196 void CMSCollector::checkpointRootsFinalWork() {
4197   GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4198 
4199   assert(haveFreelistLocks(), "must have free list locks");
4200   assert_lock_strong(bitMapLock());
4201 
4202   ResourceMark rm;
4203   HandleMark   hm;
4204 
4205   CMSHeap* heap = CMSHeap::heap();
4206 
4207   assert(haveFreelistLocks(), "must have free list locks");
4208   assert_lock_strong(bitMapLock());
4209 
4210   // We might assume that we need not fill TLAB's when


5331 
5332   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5333   _intra_sweep_timer.reset();
5334   _intra_sweep_timer.start();
5335   {
5336     GCTraceCPUTime tcpu;
5337     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5338     // First sweep the old gen
5339     {
5340       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5341                                bitMapLock());
5342       sweepWork(_cmsGen);
5343     }
5344 
5345     // Update Universe::_heap_*_at_gc figures.
5346     // We need all the free list locks to make the abstract state
5347     // transition from Sweeping to Resetting. See detailed note
5348     // further below.
5349     {
5350       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5351       
5352       // Update heap occupancy information which is used as
5353       // input to soft ref clearing policy at the next gc.
5354       Universe::update_heap_info_at_gc();
5355    
5356       // recalculate CMS used space after CMS collection
5357       _cmsGen->cmsSpace()->recalculate_used_stable();
5358  
5359       _collectorState = Resizing;
5360     }
5361   }
5362   verify_work_stacks_empty();
5363   verify_overflow_empty();
5364 
5365   if (should_unload_classes()) {
5366     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5367     // requires that the virtual spaces are stable and not deleted.
5368     ClassLoaderDataGraph::set_should_purge(true);
5369   }
5370 
5371   _intra_sweep_timer.stop();
5372   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5373 
5374   _inter_sweep_timer.reset();
5375   _inter_sweep_timer.start();
5376 
5377   // We need to use a monotonically non-decreasing time in ms
5378   // or we will see time-warp warnings and os::javaTimeMillis()


5427   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5428 }
5429 
5430 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5431   return addr >= _cmsSpace->nearLargestChunk();
5432 }
5433 
5434 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5435   return _cmsSpace->find_chunk_at_end();
5436 }
5437 
5438 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5439                                                     bool full) {
5440   // If the young generation has been collected, gather any statistics
5441   // that are of interest at this point.
5442   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
5443   if (!full && current_is_young) {
5444     // Gather statistics on the young generation collection.
5445     collector()->stats().record_gc0_end(used());
5446   }
5447   _cmsSpace->recalculate_used_stable();
5448 }
5449 
5450 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5451   // We iterate over the space(s) underlying this generation,
5452   // checking the mark bit map to see if the bits corresponding
5453   // to specific blocks are marked or not. Blocks that are
5454   // marked are live and are not swept up. All remaining blocks
5455   // are swept up, with coalescing on-the-fly as we sweep up
5456   // contiguous free and/or garbage blocks:
5457   // We need to ensure that the sweeper synchronizes with allocators
5458   // and stop-the-world collectors. In particular, the following
5459   // locks are used:
5460   // . CMS token: if this is held, a stop the world collection cannot occur
5461   // . freelistLock: if this is held no allocation can occur from this
5462   //                 generation by another thread
5463   // . bitMapLock: if this is held, no other thread can access or update
5464   //
5465 
5466   // Note that we need to hold the freelistLock if we use
5467   // block iterate below; else the iterator might go awry if