< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 9733 : [mq]: webrev.00
rev 9734 : [mq]: webrev.01


2725  private:
2726   CMSCollector *_collector;
2727   const char *_title;
2728   GCTraceConcTime(Info, gc) _trace_time;
2729 
2730  public:
2731   // Not MT-safe; so do not pass around these StackObj's
2732   // where they may be accessed by other threads.
2733   jlong wallclock_millis() {
2734     return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2735   }
2736 };
2737 
2738 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2739                                        const char *title) :
2740   _collector(collector), _title(title), _trace_time(title) {
2741 
2742   _collector->resetYields();
2743   _collector->resetTimer();
2744   _collector->startTimer();

2745 }
2746 
2747 CMSPhaseAccounting::~CMSPhaseAccounting() {

2748   _collector->stopTimer();
2749   log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2750   log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2751 }
2752 
2753 // CMS work
2754 
2755 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2756 class CMSParMarkTask : public AbstractGangTask {
2757  protected:
2758   CMSCollector*     _collector;
2759   uint              _n_workers;
2760   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2761       AbstractGangTask(name),
2762       _collector(collector),
2763       _n_workers(n_workers) {}
2764   // Work method in support of parallel rescan ... of young gen spaces
2765   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2766                              ContiguousSpace* space,
2767                              HeapWord** chunk_array, size_t chunk_top);


2891        "Was cleared in most recent final checkpoint phase"
2892        " or no bits are set in the gc_prologue before the start of the next "
2893        "subsequent marking phase.");
2894 
2895   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2896 
2897   // Save the end of the used_region of the constituent generations
2898   // to be used to limit the extent of sweep in each generation.
2899   save_sweep_limits();
2900   verify_overflow_empty();
2901 }
2902 
2903 bool CMSCollector::markFromRoots() {
2904   // we might be tempted to assert that:
2905   // assert(!SafepointSynchronize::is_at_safepoint(),
2906   //        "inconsistent argument?");
2907   // However that wouldn't be right, because it's possible that
2908   // a safepoint is indeed in progress as a young generation
2909   // stop-the-world GC happens even as we mark in this generation.
2910   assert(_collectorState == Marking, "inconsistent state?");
2911   _gc_timer_cm->register_gc_concurrent_start("Concurrent Mark");
2912   check_correct_thread_executing();
2913   verify_overflow_empty();
2914 
2915   // Weak ref discovery note: We may be discovering weak
2916   // refs in this generation concurrent (but interleaved) with
2917   // weak ref discovery by the young generation collector.
2918 
2919   CMSTokenSyncWithLocks ts(true, bitMapLock());
2920   GCTraceCPUTime tcpu;
2921   CMSPhaseAccounting pa(this, "Concrurrent Mark");
2922   bool res = markFromRootsWork();
2923   if (res) {
2924     _collectorState = Precleaning;
2925   } else { // We failed and a foreground collection wants to take over
2926     assert(_foregroundGCIsActive, "internal state inconsistency");
2927     assert(_restart_addr == NULL,  "foreground will restart from scratch");
2928     log_debug(gc)("bailing out to foreground collection");
2929   }
2930   verify_overflow_empty();
2931   _gc_timer_cm->register_gc_concurrent_end();
2932   return res;
2933 }
2934 
2935 bool CMSCollector::markFromRootsWork() {
2936   // iterate over marked bits in bit map, doing a full scan and mark
2937   // from these roots using the following algorithm:
2938   // . if oop is to the right of the current scan pointer,
2939   //   mark corresponding bit (we'll process it later)
2940   // . else (oop is to left of current scan pointer)
2941   //   push oop on marking stack
2942   // . drain the marking stack
2943 
2944   // Note that when we do a marking step we need to hold the
2945   // bit map lock -- recall that direct allocation (by mutators)
2946   // and promotion (by the young generation collector) is also
2947   // marking the bit map. [the so-called allocate live policy.]
2948   // Because the implementation of bit map marking is not
2949   // robust wrt simultaneous marking of bits in the same word,
2950   // we need to make sure that there is no such interference
2951   // between concurrent such updates.


5278     assert(t->is_ConcurrentGC_thread(),
5279            "Should be CMS thread");
5280   } else {
5281     // We can be the CMS thread only if we are in a stop-world
5282     // phase of CMS collection.
5283     if (t->is_ConcurrentGC_thread()) {
5284       assert(_collectorState == InitialMarking ||
5285              _collectorState == FinalMarking,
5286              "Should be a stop-world phase");
5287       // The CMS thread should be holding the CMS_token.
5288       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5289              "Potential interference with concurrently "
5290              "executing VM thread");
5291     }
5292   }
5293 }
5294 #endif
5295 
5296 void CMSCollector::sweep() {
5297   assert(_collectorState == Sweeping, "just checking");
5298   _gc_timer_cm->register_gc_concurrent_start("Concurrent Sweep");
5299   check_correct_thread_executing();
5300   verify_work_stacks_empty();
5301   verify_overflow_empty();
5302   increment_sweep_count();
5303   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5304 
5305   _inter_sweep_timer.stop();
5306   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5307 
5308   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5309   _intra_sweep_timer.reset();
5310   _intra_sweep_timer.start();
5311   {
5312     GCTraceCPUTime tcpu;
5313     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5314     // First sweep the old gen
5315     {
5316       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5317                                bitMapLock());
5318       sweepWork(_cmsGen);


5359   // and out of the Sweeping state must be synchronously visible
5360   // globally to the mutators.
5361   // The transition into the Marking state happens with the world
5362   // stopped so the mutators will globally see it.  Sweeping is
5363   // done asynchronously by the background collector so the transition
5364   // from the Sweeping state to the Resizing state must be done
5365   // under the freelistLock (as is the check for whether to
5366   // allocate-live and whether to dirty the mod-union table).
5367   assert(_collectorState == Resizing, "Change of collector state to"
5368     " Resizing must be done under the freelistLocks (plural)");
5369 
5370   // Now that sweeping has been completed, we clear
5371   // the incremental_collection_failed flag,
5372   // thus inviting a younger gen collection to promote into
5373   // this generation. If such a promotion may still fail,
5374   // the flag will be set again when a young collection is
5375   // attempted.
5376   GenCollectedHeap* gch = GenCollectedHeap::heap();
5377   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5378   gch->update_full_collections_completed(_collection_count_start);
5379   _gc_timer_cm->register_gc_concurrent_end();
5380 }
5381 
5382 // FIX ME!!! Looks like this belongs in CFLSpace, with
5383 // CMSGen merely delegating to it.
5384 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5385   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5386   HeapWord*  minAddr        = _cmsSpace->bottom();
5387   HeapWord*  largestAddr    =
5388     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5389   if (largestAddr == NULL) {
5390     // The dictionary appears to be empty.  In this case
5391     // try to coalesce at the end of the heap.
5392     largestAddr = _cmsSpace->end();
5393   }
5394   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5395   size_t nearLargestOffset =
5396     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5397   log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5398                           p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5399   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);


5470   if (should_unload_classes()) {                // unloaded classes this cycle,
5471     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5472   } else {                                      // did not unload classes,
5473     _concurrent_cycles_since_last_unload++;     // ... increment count
5474   }
5475 }
5476 
5477 // Reset CMS data structures (for now just the marking bit map)
5478 // preparatory for the next cycle.
5479 void CMSCollector::reset_concurrent() {
5480   CMSTokenSyncWithLocks ts(true, bitMapLock());
5481 
5482   // If the state is not "Resetting", the foreground  thread
5483   // has done a collection and the resetting.
5484   if (_collectorState != Resetting) {
5485     assert(_collectorState == Idling, "The state should only change"
5486       " because the foreground collector has finished the collection");
5487     return;
5488   }
5489 

5490   // Clear the mark bitmap (no grey objects to start with)
5491   // for the next cycle.
5492   GCTraceCPUTime tcpu;
5493   CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5494 
5495   HeapWord* curAddr = _markBitMap.startWord();
5496   while (curAddr < _markBitMap.endWord()) {
5497     size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5498     MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5499     _markBitMap.clear_large_range(chunk);
5500     if (ConcurrentMarkSweepThread::should_yield() &&
5501         !foregroundGCIsActive() &&
5502         CMSYield) {
5503       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5504              "CMS thread should hold CMS token");
5505       assert_lock_strong(bitMapLock());
5506       bitMapLock()->unlock();
5507       ConcurrentMarkSweepThread::desynchronize(true);
5508       stopTimer();
5509       incrementYields();
5510 
5511       // See the comment in coordinator_yield()
5512       for (unsigned i = 0; i < CMSYieldSleepCount &&
5513                        ConcurrentMarkSweepThread::should_yield() &&
5514                        !CMSCollector::foregroundGCIsActive(); ++i) {
5515         os::sleep(Thread::current(), 1, false);
5516       }
5517 
5518       ConcurrentMarkSweepThread::synchronize(true);
5519       bitMapLock()->lock_without_safepoint_check();
5520       startTimer();
5521     }
5522     curAddr = chunk.end();
5523   }
5524   // A successful mostly concurrent collection has been done.
5525   // Because only the full (i.e., concurrent mode failure) collections
5526   // are being measured for gc overhead limits, clean the "near" flag
5527   // and count.
5528   size_policy()->reset_gc_overhead_limit_count();
5529   _collectorState = Idling;

5530 
5531   register_gc_end();
5532 }
5533 
5534 // Same as above but for STW paths
5535 void CMSCollector::reset_stw() {
5536   // already have the lock
5537   assert(_collectorState == Resetting, "just checking");
5538   assert_lock_strong(bitMapLock());
5539   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5540   _markBitMap.clear_all();
5541   _collectorState = Idling;
5542   register_gc_end();
5543 }
5544 
5545 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5546   GCTraceCPUTime tcpu;
5547   TraceCollectorStats tcs(counters());
5548 
5549   switch (op) {




2725  private:
2726   CMSCollector *_collector;
2727   const char *_title;
2728   GCTraceConcTime(Info, gc) _trace_time;
2729 
2730  public:
2731   // Not MT-safe; so do not pass around these StackObj's
2732   // where they may be accessed by other threads.
2733   jlong wallclock_millis() {
2734     return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2735   }
2736 };
2737 
2738 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2739                                        const char *title) :
2740   _collector(collector), _title(title), _trace_time(title) {
2741 
2742   _collector->resetYields();
2743   _collector->resetTimer();
2744   _collector->startTimer();
2745   _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2746 }
2747 
2748 CMSPhaseAccounting::~CMSPhaseAccounting() {
2749   _collector->gc_timer_cm()->register_gc_concurrent_end();
2750   _collector->stopTimer();
2751   log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2752   log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2753 }
2754 
2755 // CMS work
2756 
2757 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2758 class CMSParMarkTask : public AbstractGangTask {
2759  protected:
2760   CMSCollector*     _collector;
2761   uint              _n_workers;
2762   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2763       AbstractGangTask(name),
2764       _collector(collector),
2765       _n_workers(n_workers) {}
2766   // Work method in support of parallel rescan ... of young gen spaces
2767   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2768                              ContiguousSpace* space,
2769                              HeapWord** chunk_array, size_t chunk_top);


2893        "Was cleared in most recent final checkpoint phase"
2894        " or no bits are set in the gc_prologue before the start of the next "
2895        "subsequent marking phase.");
2896 
2897   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2898 
2899   // Save the end of the used_region of the constituent generations
2900   // to be used to limit the extent of sweep in each generation.
2901   save_sweep_limits();
2902   verify_overflow_empty();
2903 }
2904 
2905 bool CMSCollector::markFromRoots() {
2906   // we might be tempted to assert that:
2907   // assert(!SafepointSynchronize::is_at_safepoint(),
2908   //        "inconsistent argument?");
2909   // However that wouldn't be right, because it's possible that
2910   // a safepoint is indeed in progress as a young generation
2911   // stop-the-world GC happens even as we mark in this generation.
2912   assert(_collectorState == Marking, "inconsistent state?");

2913   check_correct_thread_executing();
2914   verify_overflow_empty();
2915 
2916   // Weak ref discovery note: We may be discovering weak
2917   // refs in this generation concurrent (but interleaved) with
2918   // weak ref discovery by the young generation collector.
2919 
2920   CMSTokenSyncWithLocks ts(true, bitMapLock());
2921   GCTraceCPUTime tcpu;
2922   CMSPhaseAccounting pa(this, "Concrurrent Mark");
2923   bool res = markFromRootsWork();
2924   if (res) {
2925     _collectorState = Precleaning;
2926   } else { // We failed and a foreground collection wants to take over
2927     assert(_foregroundGCIsActive, "internal state inconsistency");
2928     assert(_restart_addr == NULL,  "foreground will restart from scratch");
2929     log_debug(gc)("bailing out to foreground collection");
2930   }
2931   verify_overflow_empty();

2932   return res;
2933 }
2934 
2935 bool CMSCollector::markFromRootsWork() {
2936   // iterate over marked bits in bit map, doing a full scan and mark
2937   // from these roots using the following algorithm:
2938   // . if oop is to the right of the current scan pointer,
2939   //   mark corresponding bit (we'll process it later)
2940   // . else (oop is to left of current scan pointer)
2941   //   push oop on marking stack
2942   // . drain the marking stack
2943 
2944   // Note that when we do a marking step we need to hold the
2945   // bit map lock -- recall that direct allocation (by mutators)
2946   // and promotion (by the young generation collector) is also
2947   // marking the bit map. [the so-called allocate live policy.]
2948   // Because the implementation of bit map marking is not
2949   // robust wrt simultaneous marking of bits in the same word,
2950   // we need to make sure that there is no such interference
2951   // between concurrent such updates.


5278     assert(t->is_ConcurrentGC_thread(),
5279            "Should be CMS thread");
5280   } else {
5281     // We can be the CMS thread only if we are in a stop-world
5282     // phase of CMS collection.
5283     if (t->is_ConcurrentGC_thread()) {
5284       assert(_collectorState == InitialMarking ||
5285              _collectorState == FinalMarking,
5286              "Should be a stop-world phase");
5287       // The CMS thread should be holding the CMS_token.
5288       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5289              "Potential interference with concurrently "
5290              "executing VM thread");
5291     }
5292   }
5293 }
5294 #endif
5295 
5296 void CMSCollector::sweep() {
5297   assert(_collectorState == Sweeping, "just checking");

5298   check_correct_thread_executing();
5299   verify_work_stacks_empty();
5300   verify_overflow_empty();
5301   increment_sweep_count();
5302   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5303 
5304   _inter_sweep_timer.stop();
5305   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5306 
5307   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5308   _intra_sweep_timer.reset();
5309   _intra_sweep_timer.start();
5310   {
5311     GCTraceCPUTime tcpu;
5312     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5313     // First sweep the old gen
5314     {
5315       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5316                                bitMapLock());
5317       sweepWork(_cmsGen);


5358   // and out of the Sweeping state must be synchronously visible
5359   // globally to the mutators.
5360   // The transition into the Marking state happens with the world
5361   // stopped so the mutators will globally see it.  Sweeping is
5362   // done asynchronously by the background collector so the transition
5363   // from the Sweeping state to the Resizing state must be done
5364   // under the freelistLock (as is the check for whether to
5365   // allocate-live and whether to dirty the mod-union table).
5366   assert(_collectorState == Resizing, "Change of collector state to"
5367     " Resizing must be done under the freelistLocks (plural)");
5368 
5369   // Now that sweeping has been completed, we clear
5370   // the incremental_collection_failed flag,
5371   // thus inviting a younger gen collection to promote into
5372   // this generation. If such a promotion may still fail,
5373   // the flag will be set again when a young collection is
5374   // attempted.
5375   GenCollectedHeap* gch = GenCollectedHeap::heap();
5376   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5377   gch->update_full_collections_completed(_collection_count_start);

5378 }
5379 
5380 // FIX ME!!! Looks like this belongs in CFLSpace, with
5381 // CMSGen merely delegating to it.
5382 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5383   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5384   HeapWord*  minAddr        = _cmsSpace->bottom();
5385   HeapWord*  largestAddr    =
5386     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5387   if (largestAddr == NULL) {
5388     // The dictionary appears to be empty.  In this case
5389     // try to coalesce at the end of the heap.
5390     largestAddr = _cmsSpace->end();
5391   }
5392   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5393   size_t nearLargestOffset =
5394     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5395   log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5396                           p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5397   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);


5468   if (should_unload_classes()) {                // unloaded classes this cycle,
5469     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5470   } else {                                      // did not unload classes,
5471     _concurrent_cycles_since_last_unload++;     // ... increment count
5472   }
5473 }
5474 
5475 // Reset CMS data structures (for now just the marking bit map)
5476 // preparatory for the next cycle.
5477 void CMSCollector::reset_concurrent() {
5478   CMSTokenSyncWithLocks ts(true, bitMapLock());
5479 
5480   // If the state is not "Resetting", the foreground  thread
5481   // has done a collection and the resetting.
5482   if (_collectorState != Resetting) {
5483     assert(_collectorState == Idling, "The state should only change"
5484       " because the foreground collector has finished the collection");
5485     return;
5486   }
5487 
5488   {
5489     // Clear the mark bitmap (no grey objects to start with)
5490     // for the next cycle.
5491     GCTraceCPUTime tcpu;
5492     CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5493 
5494     HeapWord* curAddr = _markBitMap.startWord();
5495     while (curAddr < _markBitMap.endWord()) {
5496       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5497       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5498       _markBitMap.clear_large_range(chunk);
5499       if (ConcurrentMarkSweepThread::should_yield() &&
5500           !foregroundGCIsActive() &&
5501           CMSYield) {
5502         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5503                "CMS thread should hold CMS token");
5504         assert_lock_strong(bitMapLock());
5505         bitMapLock()->unlock();
5506         ConcurrentMarkSweepThread::desynchronize(true);
5507         stopTimer();
5508         incrementYields();
5509 
5510         // See the comment in coordinator_yield()
5511         for (unsigned i = 0; i < CMSYieldSleepCount &&
5512                          ConcurrentMarkSweepThread::should_yield() &&
5513                          !CMSCollector::foregroundGCIsActive(); ++i) {
5514           os::sleep(Thread::current(), 1, false);
5515         }
5516 
5517         ConcurrentMarkSweepThread::synchronize(true);
5518         bitMapLock()->lock_without_safepoint_check();
5519         startTimer();
5520       }
5521       curAddr = chunk.end();
5522     }
5523     // A successful mostly concurrent collection has been done.
5524     // Because only the full (i.e., concurrent mode failure) collections
5525     // are being measured for gc overhead limits, clean the "near" flag
5526     // and count.
5527     size_policy()->reset_gc_overhead_limit_count();
5528     _collectorState = Idling;
5529   }
5530 
5531   register_gc_end();
5532 }
5533 
5534 // Same as above but for STW paths
5535 void CMSCollector::reset_stw() {
5536   // already have the lock
5537   assert(_collectorState == Resetting, "just checking");
5538   assert_lock_strong(bitMapLock());
5539   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5540   _markBitMap.clear_all();
5541   _collectorState = Idling;
5542   register_gc_end();
5543 }
5544 
5545 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5546   GCTraceCPUTime tcpu;
5547   TraceCollectorStats tcs(counters());
5548 
5549   switch (op) {


< prev index next >