src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 675 void ConcurrentMarkSweepGeneration::print_statistics() {
 676   cmsSpace()->printFLCensus(0);
 677 }
 678 #endif
 679 
 680 size_t
 681 ConcurrentMarkSweepGeneration::contiguous_available() const {
 682   // dld proposes an improvement in precision here. If the committed
 683   // part of the space ends in a free block we should add that to
 684   // uncommitted size in the calculation below. Will make this
 685   // change later, staying with the approximation below for the
 686   // time being. -- ysr.
 687   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 688 }
 689 
 690 size_t
 691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 692   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 693 }
 694 




 695 size_t ConcurrentMarkSweepGeneration::max_available() const {
 696   return free() + _virtual_space.uncommitted_size();
 697 }
 698 
 699 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 700   size_t available = max_available();
 701   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 702   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 703   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 704                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 705   return res;
 706 }
 707 
 708 // At a promotion failure dump information on block layout in heap
 709 // (cms old generation).
 710 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 711   Log(gc, promotion) log;
 712   if (log.is_trace()) {
 713     LogStream ls(log.trace());
 714     cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);


1506                                          max_eden_size,
1507                                          full,
1508                                          gc_cause,
1509                                          heap->soft_ref_policy());
1510 
1511   // Reset the expansion cause, now that we just completed
1512   // a collection cycle.
1513   clear_expansion_cause();
1514   _foregroundGCIsActive = false;
1515   return;
1516 }
1517 
1518 // Resize the tenured generation
1519 // after obtaining the free list locks for the
1520 // two generations.
1521 void CMSCollector::compute_new_size() {
1522   assert_locked_or_safepoint(Heap_lock);
1523   FreelistLocker z(this);
1524   MetaspaceGC::compute_new_size();
1525   _cmsGen->compute_new_size_free_list();


1526 }
1527 
1528 // A work method used by the foreground collector to do
1529 // a mark-sweep-compact.
1530 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1531   CMSHeap* heap = CMSHeap::heap();
1532 
1533   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1534   gc_timer->register_gc_start();
1535 
1536   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1537   gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
1538 
1539   heap->pre_full_gc_dump(gc_timer);
1540 
1541   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1542 
1543   // Temporarily widen the span of the weak reference processing to
1544   // the entire heap.
1545   MemRegion new_span(CMSHeap::heap()->reserved_region());


1728       register_gc_start(cause);
1729       // Reset the expansion cause, now that we are about to begin
1730       // a new cycle.
1731       clear_expansion_cause();
1732 
1733       // Clear the MetaspaceGC flag since a concurrent collection
1734       // is starting but also clear it after the collection.
1735       MetaspaceGC::set_should_concurrent_collect(false);
1736     }
1737     // Decide if we want to enable class unloading as part of the
1738     // ensuing concurrent GC cycle.
1739     update_should_unload_classes();
1740     _full_gc_requested = false;           // acks all outstanding full gc requests
1741     _full_gc_cause = GCCause::_no_gc;
1742     // Signal that we are about to start a collection
1743     heap->increment_total_full_collections();  // ... starting a collection cycle
1744     _collection_count_start = heap->total_full_collections();
1745   }
1746 
1747   size_t prev_used = _cmsGen->used();

1748 
1749   // The change of the collection state is normally done at this level;
1750   // the exceptions are phases that are executed while the world is
1751   // stopped.  For those phases the change of state is done while the
1752   // world is stopped.  For baton passing purposes this allows the
1753   // background collector to finish the phase and change state atomically.
1754   // The foreground collector cannot wait on a phase that is done
1755   // while the world is stopped because the foreground collector already
1756   // has the world stopped and would deadlock.
1757   while (_collectorState != Idling) {
1758     log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1759                          p2i(Thread::current()), _collectorState);
1760     // The foreground collector
1761     //   holds the Heap_lock throughout its collection.
1762     //   holds the CMS token (but not the lock)
1763     //     except while it is waiting for the background collector to yield.
1764     //
1765     // The foreground collector should be blocked (not for long)
1766     //   if the background collector is about to start a phase
1767     //   executed with world stopped.  If the background


2034   // there are any modified oops in the class. The remark phase also needs
2035   // that information. Tell the young collection to save the union of all
2036   // modified klasses.
2037   if (duringMarking) {
2038     _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2039   }
2040 
2041   bool registerClosure = duringMarking;
2042 
2043   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2044 
2045   if (!full) {
2046     stats().record_gc0_begin();
2047   }
2048 }
2049 
2050 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2051 
2052   _capacity_at_prologue = capacity();
2053   _used_at_prologue = used();

2054 
2055   // We enable promotion tracking so that card-scanning can recognize
2056   // which objects have been promoted during this GC and skip them.
2057   for (uint i = 0; i < ParallelGCThreads; i++) {
2058     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2059   }
2060 
2061   // Delegate to CMScollector which knows how to coordinate between
2062   // this and any other CMS generations that it is responsible for
2063   // collecting.
2064   collector()->gc_prologue(full);
2065 }
2066 
2067 // This is a "private" interface for use by this generation's CMSCollector.
2068 // Not to be called directly by any other entity (for instance,
2069 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2070 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2071   bool registerClosure, ModUnionClosure* modUnionClosure) {
2072   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2073   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,


2106     // We have already been invoked; this is a gc_epilogue delegation
2107     // from yet another CMS generation that we are responsible for, just
2108     // ignore it since all relevant work has already been done.
2109     return;
2110   }
2111   assert(haveFreelistLocks(), "must have freelist locks");
2112   assert_lock_strong(bitMapLock());
2113 
2114   _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2115 
2116   _cmsGen->gc_epilogue_work(full);
2117 
2118   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2119     // in case sampling was not already enabled, enable it
2120     _start_sampling = true;
2121   }
2122   // reset _eden_chunk_array so sampling starts afresh
2123   _eden_chunk_index = 0;
2124 
2125   size_t cms_used   = _cmsGen->cmsSpace()->used();

2126 
2127   // update performance counters - this uses a special version of
2128   // update_counters() that allows the utilization to be passed as a
2129   // parameter, avoiding multiple calls to used().
2130   //
2131   _cmsGen->update_counters(cms_used);
2132 
2133   bitMapLock()->unlock();
2134   releaseFreelistLocks();
2135 
2136   if (!CleanChunkPoolAsync) {
2137     Chunk::clean_chunk_pool();
2138   }
2139 
2140   set_did_compact(false);
2141   _between_prologue_and_epilogue = false;  // ready for next cycle
2142 }
2143 
2144 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2145   collector()->gc_epilogue(full);


2799 // be approximate -- we'll do a catch up phase subsequently.]
2800 void CMSCollector::checkpointRootsInitial() {
2801   assert(_collectorState == InitialMarking, "Wrong collector state");
2802   check_correct_thread_executing();
2803   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
2804 
2805   save_heap_summary();
2806   report_heap_summary(GCWhen::BeforeGC);
2807 
2808   ReferenceProcessor* rp = ref_processor();
2809   assert(_restart_addr == NULL, "Control point invariant");
2810   {
2811     // acquire locks for subsequent manipulations
2812     MutexLocker x(bitMapLock(),
2813                   Mutex::_no_safepoint_check_flag);
2814     checkpointRootsInitialWork();
2815     // enable ("weak") refs discovery
2816     rp->enable_discovery();
2817     _collectorState = Marking;
2818   }


2819 }
2820 
2821 void CMSCollector::checkpointRootsInitialWork() {
2822   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2823   assert(_collectorState == InitialMarking, "just checking");
2824 
2825   // Already have locks.
2826   assert_lock_strong(bitMapLock());
2827   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2828 
2829   // Setup the verification and class unloading state for this
2830   // CMS collection cycle.
2831   setup_cms_unloading_and_verification_state();
2832 
2833   GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2834 
2835   // Reset all the PLAB chunk arrays if necessary.
2836   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2837     reset_survivor_plab_arrays();
2838   }


4160   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4161                 _young_gen->used() / K, _young_gen->capacity() / K);
4162   {
4163     if (CMSScavengeBeforeRemark) {
4164       CMSHeap* heap = CMSHeap::heap();
4165       // Temporarily set flag to false, GCH->do_collection will
4166       // expect it to be false and set to true
4167       FlagSetting fl(heap->_is_gc_active, false);
4168 
4169       heap->do_collection(true,                      // full (i.e. force, see below)
4170                           false,                     // !clear_all_soft_refs
4171                           0,                         // size
4172                           false,                     // is_tlab
4173                           GenCollectedHeap::YoungGen // type
4174         );
4175     }
4176     FreelistLocker x(this);
4177     MutexLocker y(bitMapLock(),
4178                   Mutex::_no_safepoint_check_flag);
4179     checkpointRootsFinalWork();

4180   }
4181   verify_work_stacks_empty();
4182   verify_overflow_empty();
4183 }
4184 
4185 void CMSCollector::checkpointRootsFinalWork() {
4186   GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4187 
4188   assert(haveFreelistLocks(), "must have free list locks");
4189   assert_lock_strong(bitMapLock());
4190 
4191   ResourceMark rm;
4192   HandleMark   hm;
4193 
4194   CMSHeap* heap = CMSHeap::heap();
4195 
4196   assert(haveFreelistLocks(), "must have free list locks");
4197   assert_lock_strong(bitMapLock());
4198 
4199   // We might assume that we need not fill TLAB's when


5320 
5321   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5322   _intra_sweep_timer.reset();
5323   _intra_sweep_timer.start();
5324   {
5325     GCTraceCPUTime tcpu;
5326     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5327     // First sweep the old gen
5328     {
5329       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5330                                bitMapLock());
5331       sweepWork(_cmsGen);
5332     }
5333 
5334     // Update Universe::_heap_*_at_gc figures.
5335     // We need all the free list locks to make the abstract state
5336     // transition from Sweeping to Resetting. See detailed note
5337     // further below.
5338     {
5339       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());

5340       // Update heap occupancy information which is used as
5341       // input to soft ref clearing policy at the next gc.
5342       Universe::update_heap_info_at_gc();




5343       _collectorState = Resizing;
5344     }
5345   }
5346   verify_work_stacks_empty();
5347   verify_overflow_empty();
5348 
5349   if (should_unload_classes()) {
5350     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5351     // requires that the virtual spaces are stable and not deleted.
5352     ClassLoaderDataGraph::set_should_purge(true);
5353   }
5354 
5355   _intra_sweep_timer.stop();
5356   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5357 
5358   _inter_sweep_timer.reset();
5359   _inter_sweep_timer.start();
5360 
5361   // We need to use a monotonically non-decreasing time in ms
5362   // or we will see time-warp warnings and os::javaTimeMillis()


5411   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5412 }
5413 
5414 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5415   return addr >= _cmsSpace->nearLargestChunk();
5416 }
5417 
5418 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5419   return _cmsSpace->find_chunk_at_end();
5420 }
5421 
5422 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5423                                                     bool full) {
5424   // If the young generation has been collected, gather any statistics
5425   // that are of interest at this point.
5426   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
5427   if (!full && current_is_young) {
5428     // Gather statistics on the young generation collection.
5429     collector()->stats().record_gc0_end(used());
5430   }

5431 }
5432 
5433 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5434   // We iterate over the space(s) underlying this generation,
5435   // checking the mark bit map to see if the bits corresponding
5436   // to specific blocks are marked or not. Blocks that are
5437   // marked are live and are not swept up. All remaining blocks
5438   // are swept up, with coalescing on-the-fly as we sweep up
5439   // contiguous free and/or garbage blocks:
5440   // We need to ensure that the sweeper synchronizes with allocators
5441   // and stop-the-world collectors. In particular, the following
5442   // locks are used:
5443   // . CMS token: if this is held, a stop the world collection cannot occur
5444   // . freelistLock: if this is held no allocation can occur from this
5445   //                 generation by another thread
5446   // . bitMapLock: if this is held, no other thread can access or update
5447   //
5448 
5449   // Note that we need to hold the freelistLock if we use
5450   // block iterate below; else the iterator might go awry if




 675 void ConcurrentMarkSweepGeneration::print_statistics() {
 676   cmsSpace()->printFLCensus(0);
 677 }
 678 #endif
 679 
 680 size_t
 681 ConcurrentMarkSweepGeneration::contiguous_available() const {
 682   // dld proposes an improvement in precision here. If the committed
 683   // part of the space ends in a free block we should add that to
 684   // uncommitted size in the calculation below. Will make this
 685   // change later, staying with the approximation below for the
 686   // time being. -- ysr.
 687   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 688 }
 689 
 690 size_t
 691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 692   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 693 }
 694 
 695 size_t ConcurrentMarkSweepGeneration::used_stable() const {
 696   return cmsSpace()->used_stable();
 697 }
 698 
 699 size_t ConcurrentMarkSweepGeneration::max_available() const {
 700   return free() + _virtual_space.uncommitted_size();
 701 }
 702 
 703 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 704   size_t available = max_available();
 705   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 706   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 707   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 708                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 709   return res;
 710 }
 711 
 712 // At a promotion failure dump information on block layout in heap
 713 // (cms old generation).
 714 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 715   Log(gc, promotion) log;
 716   if (log.is_trace()) {
 717     LogStream ls(log.trace());
 718     cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);


1510                                          max_eden_size,
1511                                          full,
1512                                          gc_cause,
1513                                          heap->soft_ref_policy());
1514 
1515   // Reset the expansion cause, now that we just completed
1516   // a collection cycle.
1517   clear_expansion_cause();
1518   _foregroundGCIsActive = false;
1519   return;
1520 }
1521 
1522 // Resize the tenured generation
1523 // after obtaining the free list locks for the
1524 // two generations.
1525 void CMSCollector::compute_new_size() {
1526   assert_locked_or_safepoint(Heap_lock);
1527   FreelistLocker z(this);
1528   MetaspaceGC::compute_new_size();
1529   _cmsGen->compute_new_size_free_list();
1530   // recalculate CMS used space after CMS collection
1531   _cmsGen->cmsSpace()->recalculate_used_stable();
1532 }
1533 
1534 // A work method used by the foreground collector to do
1535 // a mark-sweep-compact.
1536 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1537   CMSHeap* heap = CMSHeap::heap();
1538 
1539   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1540   gc_timer->register_gc_start();
1541 
1542   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1543   gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
1544 
1545   heap->pre_full_gc_dump(gc_timer);
1546 
1547   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1548 
1549   // Temporarily widen the span of the weak reference processing to
1550   // the entire heap.
1551   MemRegion new_span(CMSHeap::heap()->reserved_region());


1734       register_gc_start(cause);
1735       // Reset the expansion cause, now that we are about to begin
1736       // a new cycle.
1737       clear_expansion_cause();
1738 
1739       // Clear the MetaspaceGC flag since a concurrent collection
1740       // is starting but also clear it after the collection.
1741       MetaspaceGC::set_should_concurrent_collect(false);
1742     }
1743     // Decide if we want to enable class unloading as part of the
1744     // ensuing concurrent GC cycle.
1745     update_should_unload_classes();
1746     _full_gc_requested = false;           // acks all outstanding full gc requests
1747     _full_gc_cause = GCCause::_no_gc;
1748     // Signal that we are about to start a collection
1749     heap->increment_total_full_collections();  // ... starting a collection cycle
1750     _collection_count_start = heap->total_full_collections();
1751   }
1752 
1753   size_t prev_used = _cmsGen->used();
1754   _cmsGen->cmsSpace()->recalculate_used_stable();
1755 
1756   // The change of the collection state is normally done at this level;
1757   // the exceptions are phases that are executed while the world is
1758   // stopped.  For those phases the change of state is done while the
1759   // world is stopped.  For baton passing purposes this allows the
1760   // background collector to finish the phase and change state atomically.
1761   // The foreground collector cannot wait on a phase that is done
1762   // while the world is stopped because the foreground collector already
1763   // has the world stopped and would deadlock.
1764   while (_collectorState != Idling) {
1765     log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1766                          p2i(Thread::current()), _collectorState);
1767     // The foreground collector
1768     //   holds the Heap_lock throughout its collection.
1769     //   holds the CMS token (but not the lock)
1770     //     except while it is waiting for the background collector to yield.
1771     //
1772     // The foreground collector should be blocked (not for long)
1773     //   if the background collector is about to start a phase
1774     //   executed with world stopped.  If the background


2041   // there are any modified oops in the class. The remark phase also needs
2042   // that information. Tell the young collection to save the union of all
2043   // modified klasses.
2044   if (duringMarking) {
2045     _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2046   }
2047 
2048   bool registerClosure = duringMarking;
2049 
2050   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2051 
2052   if (!full) {
2053     stats().record_gc0_begin();
2054   }
2055 }
2056 
2057 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2058 
2059   _capacity_at_prologue = capacity();
2060   _used_at_prologue = used();
2061   _cmsSpace->recalculate_used_stable();
2062 
2063   // We enable promotion tracking so that card-scanning can recognize
2064   // which objects have been promoted during this GC and skip them.
2065   for (uint i = 0; i < ParallelGCThreads; i++) {
2066     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2067   }
2068 
2069   // Delegate to CMScollector which knows how to coordinate between
2070   // this and any other CMS generations that it is responsible for
2071   // collecting.
2072   collector()->gc_prologue(full);
2073 }
2074 
2075 // This is a "private" interface for use by this generation's CMSCollector.
2076 // Not to be called directly by any other entity (for instance,
2077 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2078 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2079   bool registerClosure, ModUnionClosure* modUnionClosure) {
2080   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2081   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,


2114     // We have already been invoked; this is a gc_epilogue delegation
2115     // from yet another CMS generation that we are responsible for, just
2116     // ignore it since all relevant work has already been done.
2117     return;
2118   }
2119   assert(haveFreelistLocks(), "must have freelist locks");
2120   assert_lock_strong(bitMapLock());
2121 
2122   _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2123 
2124   _cmsGen->gc_epilogue_work(full);
2125 
2126   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2127     // in case sampling was not already enabled, enable it
2128     _start_sampling = true;
2129   }
2130   // reset _eden_chunk_array so sampling starts afresh
2131   _eden_chunk_index = 0;
2132 
2133   size_t cms_used   = _cmsGen->cmsSpace()->used();
2134   _cmsGen->cmsSpace()->recalculate_used_stable();
2135 
2136   // update performance counters - this uses a special version of
2137   // update_counters() that allows the utilization to be passed as a
2138   // parameter, avoiding multiple calls to used().
2139   //
2140   _cmsGen->update_counters(cms_used);
2141 
2142   bitMapLock()->unlock();
2143   releaseFreelistLocks();
2144 
2145   if (!CleanChunkPoolAsync) {
2146     Chunk::clean_chunk_pool();
2147   }
2148 
2149   set_did_compact(false);
2150   _between_prologue_and_epilogue = false;  // ready for next cycle
2151 }
2152 
2153 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2154   collector()->gc_epilogue(full);


2808 // be approximate -- we'll do a catch up phase subsequently.]
2809 void CMSCollector::checkpointRootsInitial() {
2810   assert(_collectorState == InitialMarking, "Wrong collector state");
2811   check_correct_thread_executing();
2812   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
2813 
2814   save_heap_summary();
2815   report_heap_summary(GCWhen::BeforeGC);
2816 
2817   ReferenceProcessor* rp = ref_processor();
2818   assert(_restart_addr == NULL, "Control point invariant");
2819   {
2820     // acquire locks for subsequent manipulations
2821     MutexLocker x(bitMapLock(),
2822                   Mutex::_no_safepoint_check_flag);
2823     checkpointRootsInitialWork();
2824     // enable ("weak") refs discovery
2825     rp->enable_discovery();
2826     _collectorState = Marking;
2827   }
2828 
2829   _cmsGen->cmsSpace()->recalculate_used_stable();
2830 }
2831 
2832 void CMSCollector::checkpointRootsInitialWork() {
2833   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2834   assert(_collectorState == InitialMarking, "just checking");
2835 
2836   // Already have locks.
2837   assert_lock_strong(bitMapLock());
2838   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2839 
2840   // Setup the verification and class unloading state for this
2841   // CMS collection cycle.
2842   setup_cms_unloading_and_verification_state();
2843 
2844   GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2845 
2846   // Reset all the PLAB chunk arrays if necessary.
2847   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2848     reset_survivor_plab_arrays();
2849   }


4171   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4172                 _young_gen->used() / K, _young_gen->capacity() / K);
4173   {
4174     if (CMSScavengeBeforeRemark) {
4175       CMSHeap* heap = CMSHeap::heap();
4176       // Temporarily set flag to false, GCH->do_collection will
4177       // expect it to be false and set to true
4178       FlagSetting fl(heap->_is_gc_active, false);
4179 
4180       heap->do_collection(true,                      // full (i.e. force, see below)
4181                           false,                     // !clear_all_soft_refs
4182                           0,                         // size
4183                           false,                     // is_tlab
4184                           GenCollectedHeap::YoungGen // type
4185         );
4186     }
4187     FreelistLocker x(this);
4188     MutexLocker y(bitMapLock(),
4189                   Mutex::_no_safepoint_check_flag);
4190     checkpointRootsFinalWork();
4191     _cmsGen->cmsSpace()->recalculate_used_stable();
4192   }
4193   verify_work_stacks_empty();
4194   verify_overflow_empty();
4195 }
4196 
4197 void CMSCollector::checkpointRootsFinalWork() {
4198   GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4199 
4200   assert(haveFreelistLocks(), "must have free list locks");
4201   assert_lock_strong(bitMapLock());
4202 
4203   ResourceMark rm;
4204   HandleMark   hm;
4205 
4206   CMSHeap* heap = CMSHeap::heap();
4207 
4208   assert(haveFreelistLocks(), "must have free list locks");
4209   assert_lock_strong(bitMapLock());
4210 
4211   // We might assume that we need not fill TLAB's when


5332 
5333   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5334   _intra_sweep_timer.reset();
5335   _intra_sweep_timer.start();
5336   {
5337     GCTraceCPUTime tcpu;
5338     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5339     // First sweep the old gen
5340     {
5341       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5342                                bitMapLock());
5343       sweepWork(_cmsGen);
5344     }
5345 
5346     // Update Universe::_heap_*_at_gc figures.
5347     // We need all the free list locks to make the abstract state
5348     // transition from Sweeping to Resetting. See detailed note
5349     // further below.
5350     {
5351       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5352       
5353       // Update heap occupancy information which is used as
5354       // input to soft ref clearing policy at the next gc.
5355       Universe::update_heap_info_at_gc();
5356    
5357       // recalculate CMS used space after CMS collection
5358       _cmsGen->cmsSpace()->recalculate_used_stable();
5359  
5360       _collectorState = Resizing;
5361     }
5362   }
5363   verify_work_stacks_empty();
5364   verify_overflow_empty();
5365 
5366   if (should_unload_classes()) {
5367     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5368     // requires that the virtual spaces are stable and not deleted.
5369     ClassLoaderDataGraph::set_should_purge(true);
5370   }
5371 
5372   _intra_sweep_timer.stop();
5373   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5374 
5375   _inter_sweep_timer.reset();
5376   _inter_sweep_timer.start();
5377 
5378   // We need to use a monotonically non-decreasing time in ms
5379   // or we will see time-warp warnings and os::javaTimeMillis()


5428   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5429 }
5430 
5431 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5432   return addr >= _cmsSpace->nearLargestChunk();
5433 }
5434 
5435 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5436   return _cmsSpace->find_chunk_at_end();
5437 }
5438 
5439 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5440                                                     bool full) {
5441   // If the young generation has been collected, gather any statistics
5442   // that are of interest at this point.
5443   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
5444   if (!full && current_is_young) {
5445     // Gather statistics on the young generation collection.
5446     collector()->stats().record_gc0_end(used());
5447   }
5448   _cmsSpace->recalculate_used_stable();
5449 }
5450 
5451 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5452   // We iterate over the space(s) underlying this generation,
5453   // checking the mark bit map to see if the bits corresponding
5454   // to specific blocks are marked or not. Blocks that are
5455   // marked are live and are not swept up. All remaining blocks
5456   // are swept up, with coalescing on-the-fly as we sweep up
5457   // contiguous free and/or garbage blocks:
5458   // We need to ensure that the sweeper synchronizes with allocators
5459   // and stop-the-world collectors. In particular, the following
5460   // locks are used:
5461   // . CMS token: if this is held, a stop the world collection cannot occur
5462   // . freelistLock: if this is held no allocation can occur from this
5463   //                 generation by another thread
5464   // . bitMapLock: if this is held, no other thread can access or update
5465   //
5466 
5467   // Note that we need to hold the freelistLock if we use
5468   // block iterate below; else the iterator might go awry if