< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/cms/cmsCollectorPolicy.hpp"
  31 #include "gc/cms/cmsOopClosures.inline.hpp"
  32 #include "gc/cms/compactibleFreeListSpace.hpp"
  33 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  34 #include "gc/cms/concurrentMarkSweepThread.hpp"
  35 #include "gc/cms/parNewGeneration.hpp"
  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"

  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "memory/padded.hpp"
  59 #include "memory/resourceArea.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "runtime/atomic.inline.hpp"
  63 #include "runtime/globals_extension.hpp"
  64 #include "runtime/handles.inline.hpp"


1576 // after obtaining the free list locks for the
1577 // two generations.
1578 void CMSCollector::compute_new_size() {
1579   assert_locked_or_safepoint(Heap_lock);
1580   FreelistLocker z(this);
1581   MetaspaceGC::compute_new_size();
1582   _cmsGen->compute_new_size_free_list();
1583 }
1584 
1585 // A work method used by the foreground collector to do
1586 // a mark-sweep-compact.
1587 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1588   GenCollectedHeap* gch = GenCollectedHeap::heap();
1589 
1590   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1591   gc_timer->register_gc_start();
1592 
1593   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1594   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1595 
1596   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1597 
1598   // Temporarily widen the span of the weak reference processing to
1599   // the entire heap.
1600   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1601   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1602   // Temporarily, clear the "is_alive_non_header" field of the
1603   // reference processor.
1604   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1605   // Temporarily make reference _processing_ single threaded (non-MT).
1606   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1607   // Temporarily make refs discovery atomic
1608   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1609   // Temporarily make reference _discovery_ single threaded (non-MT)
1610   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1611 
1612   ref_processor()->set_enqueuing_is_done(false);
1613   ref_processor()->enable_discovery();
1614   ref_processor()->setup_policy(clear_all_soft_refs);
1615   // If an asynchronous collection finishes, the _modUnionTable is
1616   // all clear.  If we are assuming the collection from an asynchronous


2808 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2809   assert_locked_or_safepoint(Heap_lock);
2810 }
2811 
2812 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2813   assert_locked_or_safepoint(Heap_lock);
2814   assert_lock_strong(freelistLock());
2815   if (PrintGCDetails && Verbose) {
2816     warning("Shrinking of CMS not yet implemented");
2817   }
2818   return;
2819 }
2820 
2821 
2822 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2823 // phases.
2824 class CMSPhaseAccounting: public StackObj {
2825  public:
2826   CMSPhaseAccounting(CMSCollector *collector,
2827                      const char *phase,
2828                      const GCId gc_id,
2829                      bool print_cr = true);
2830   ~CMSPhaseAccounting();
2831 
2832  private:
2833   CMSCollector *_collector;
2834   const char *_phase;
2835   elapsedTimer _wallclock;
2836   bool _print_cr;
2837   const GCId _gc_id;
2838 
2839  public:
2840   // Not MT-safe; so do not pass around these StackObj's
2841   // where they may be accessed by other threads.
2842   jlong wallclock_millis() {
2843     assert(_wallclock.is_active(), "Wall clock should not stop");
2844     _wallclock.stop();  // to record time
2845     jlong ret = _wallclock.milliseconds();
2846     _wallclock.start(); // restart
2847     return ret;
2848   }
2849 };
2850 
2851 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2852                                        const char *phase,
2853                                        const GCId gc_id,
2854                                        bool print_cr) :
2855   _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2856 
2857   if (PrintCMSStatistics != 0) {
2858     _collector->resetYields();
2859   }
2860   if (PrintGCDetails) {
2861     gclog_or_tty->gclog_stamp(_gc_id);
2862     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2863       _collector->cmsGen()->short_name(), _phase);
2864   }
2865   _collector->resetTimer();
2866   _wallclock.start();
2867   _collector->startTimer();
2868 }
2869 
2870 CMSPhaseAccounting::~CMSPhaseAccounting() {
2871   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2872   _collector->stopTimer();
2873   _wallclock.stop();
2874   if (PrintGCDetails) {
2875     gclog_or_tty->gclog_stamp(_gc_id);
2876     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2877                  _collector->cmsGen()->short_name(),
2878                  _phase, _collector->timerValue(), _wallclock.seconds());
2879     if (_print_cr) {
2880       gclog_or_tty->cr();
2881     }
2882     if (PrintCMSStatistics != 0) {
2883       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2884                     _collector->yields());
2885     }
2886   }
2887 }
2888 
2889 // CMS work
2890 
2891 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2892 class CMSParMarkTask : public AbstractGangTask {
2893  protected:
2894   CMSCollector*     _collector;
2895   uint              _n_workers;


2934     checkpointRootsInitialWork();
2935     // enable ("weak") refs discovery
2936     rp->enable_discovery();
2937     _collectorState = Marking;
2938   }
2939 }
2940 
2941 void CMSCollector::checkpointRootsInitialWork() {
2942   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2943   assert(_collectorState == InitialMarking, "just checking");
2944 
2945   // Already have locks.
2946   assert_lock_strong(bitMapLock());
2947   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2948 
2949   // Setup the verification and class unloading state for this
2950   // CMS collection cycle.
2951   setup_cms_unloading_and_verification_state();
2952 
2953   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2954     PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
2955 
2956   // Reset all the PLAB chunk arrays if necessary.
2957   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2958     reset_survivor_plab_arrays();
2959   }
2960 
2961   ResourceMark rm;
2962   HandleMark  hm;
2963 
2964   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2965   GenCollectedHeap* gch = GenCollectedHeap::heap();
2966 
2967   verify_work_stacks_empty();
2968   verify_overflow_empty();
2969 
2970   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2971   // Update the saved marks which may affect the root scans.
2972   gch->save_marks();
2973 
2974   // weak reference processing has not started yet.


3037   verify_overflow_empty();
3038 }
3039 
3040 bool CMSCollector::markFromRoots() {
3041   // we might be tempted to assert that:
3042   // assert(!SafepointSynchronize::is_at_safepoint(),
3043   //        "inconsistent argument?");
3044   // However that wouldn't be right, because it's possible that
3045   // a safepoint is indeed in progress as a young generation
3046   // stop-the-world GC happens even as we mark in this generation.
3047   assert(_collectorState == Marking, "inconsistent state?");
3048   check_correct_thread_executing();
3049   verify_overflow_empty();
3050 
3051   // Weak ref discovery note: We may be discovering weak
3052   // refs in this generation concurrent (but interleaved) with
3053   // weak ref discovery by the young generation collector.
3054 
3055   CMSTokenSyncWithLocks ts(true, bitMapLock());
3056   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3057   CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3058   bool res = markFromRootsWork();
3059   if (res) {
3060     _collectorState = Precleaning;
3061   } else { // We failed and a foreground collection wants to take over
3062     assert(_foregroundGCIsActive, "internal state inconsistency");
3063     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3064     if (PrintGCDetails) {
3065       gclog_or_tty->print_cr("bailing out to foreground collection");
3066     }
3067   }
3068   verify_overflow_empty();
3069   return res;
3070 }
3071 
3072 bool CMSCollector::markFromRootsWork() {
3073   // iterate over marked bits in bit map, doing a full scan and mark
3074   // from these roots using the following algorithm:
3075   // . if oop is to the right of the current scan pointer,
3076   //   mark corresponding bit (we'll process it later)
3077   // . else (oop is to left of current scan pointer)


3734   check_correct_thread_executing();
3735   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3736   verify_work_stacks_empty();
3737   verify_overflow_empty();
3738   _abort_preclean = false;
3739   if (CMSPrecleaningEnabled) {
3740     if (!CMSEdenChunksRecordAlways) {
3741       _eden_chunk_index = 0;
3742     }
3743     size_t used = get_eden_used();
3744     size_t capacity = get_eden_capacity();
3745     // Don't start sampling unless we will get sufficiently
3746     // many samples.
3747     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3748                 * CMSScheduleRemarkEdenPenetration)) {
3749       _start_sampling = true;
3750     } else {
3751       _start_sampling = false;
3752     }
3753     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3754     CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3755     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3756   }
3757   CMSTokenSync x(true); // is cms thread
3758   if (CMSPrecleaningEnabled) {
3759     sample_eden();
3760     _collectorState = AbortablePreclean;
3761   } else {
3762     _collectorState = FinalMarking;
3763   }
3764   verify_work_stacks_empty();
3765   verify_overflow_empty();
3766 }
3767 
3768 // Try and schedule the remark such that young gen
3769 // occupancy is CMSScheduleRemarkEdenPenetration %.
3770 void CMSCollector::abortable_preclean() {
3771   check_correct_thread_executing();
3772   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3773   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3774 
3775   // If Eden's current occupancy is below this threshold,
3776   // immediately schedule the remark; else preclean
3777   // past the next scavenge in an effort to
3778   // schedule the pause as described above. By choosing
3779   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3780   // we will never do an actual abortable preclean cycle.
3781   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3782     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3783     CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3784     // We need more smarts in the abortable preclean
3785     // loop below to deal with cases where allocation
3786     // in young gen is very very slow, and our precleaning
3787     // is running a losing race against a horde of
3788     // mutators intent on flooding us with CMS updates
3789     // (dirty cards).
3790     // One, admittedly dumb, strategy is to give up
3791     // after a certain number of abortable precleaning loops
3792     // or after a certain maximum time. We want to make
3793     // this smarter in the next iteration.
3794     // XXX FIX ME!!! YSR
3795     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3796     while (!(should_abort_preclean() ||
3797              ConcurrentMarkSweepThread::should_terminate())) {
3798       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3799       cumworkdone += workdone;
3800       loops++;
3801       // Voluntarily terminate abortable preclean phase if we have
3802       // been at it for too long.
3803       if ((CMSMaxAbortablePrecleanLoops != 0) &&


3908     // Note that we don't need to protect ourselves from
3909     // interference with mutators because they can't
3910     // manipulate the discovered reference lists nor affect
3911     // the computed reachability of the referents, the
3912     // only properties manipulated by the precleaning
3913     // of these reference lists.
3914     stopTimer();
3915     CMSTokenSyncWithLocks x(true /* is cms thread */,
3916                             bitMapLock());
3917     startTimer();
3918     sample_eden();
3919 
3920     // The following will yield to allow foreground
3921     // collection to proceed promptly. XXX YSR:
3922     // The code in this method may need further
3923     // tweaking for better performance and some restructuring
3924     // for cleaner interfaces.
3925     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3926     rp->preclean_discovered_references(
3927           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3928           gc_timer, _gc_tracer_cm->gc_id());
3929   }
3930 
3931   if (clean_survivor) {  // preclean the active survivor space(s)
3932     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3933                              &_markBitMap, &_modUnionTable,
3934                              &_markStack, true /* precleaning phase */);
3935     stopTimer();
3936     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3937                              bitMapLock());
3938     startTimer();
3939     unsigned int before_count =
3940       GenCollectedHeap::heap()->total_collections();
3941     SurvivorSpacePrecleanClosure
3942       sss_cl(this, _span, &_markBitMap, &_markStack,
3943              &pam_cl, before_count, CMSYield);
3944     _young_gen->from()->object_iterate_careful(&sss_cl);
3945     _young_gen->to()->object_iterate_careful(&sss_cl);
3946   }
3947   MarkRefsIntoAndScanClosure
3948     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,


4244   // world is stopped at this checkpoint
4245   assert(SafepointSynchronize::is_at_safepoint(),
4246          "world should be stopped");
4247   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4248 
4249   verify_work_stacks_empty();
4250   verify_overflow_empty();
4251 
4252   if (PrintGCDetails) {
4253     gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4254                         _young_gen->used() / K,
4255                         _young_gen->capacity() / K);
4256   }
4257   {
4258     if (CMSScavengeBeforeRemark) {
4259       GenCollectedHeap* gch = GenCollectedHeap::heap();
4260       // Temporarily set flag to false, GCH->do_collection will
4261       // expect it to be false and set to true
4262       FlagSetting fl(gch->_is_gc_active, false);
4263       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4264         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4265       gch->do_collection(true,                      // full (i.e. force, see below)
4266                          false,                     // !clear_all_soft_refs
4267                          0,                         // size
4268                          false,                     // is_tlab
4269                          GenCollectedHeap::YoungGen // type
4270         );
4271     }
4272     FreelistLocker x(this);
4273     MutexLockerEx y(bitMapLock(),
4274                     Mutex::_no_safepoint_check_flag);
4275     checkpointRootsFinalWork();
4276   }
4277   verify_work_stacks_empty();
4278   verify_overflow_empty();
4279 }
4280 
4281 void CMSCollector::checkpointRootsFinalWork() {
4282   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4283 
4284   assert(haveFreelistLocks(), "must have free list locks");
4285   assert_lock_strong(bitMapLock());
4286 
4287   ResourceMark rm;
4288   HandleMark   hm;
4289 
4290   GenCollectedHeap* gch = GenCollectedHeap::heap();
4291 
4292   if (should_unload_classes()) {
4293     CodeCache::gc_prologue();
4294   }
4295   assert(haveFreelistLocks(), "must have free list locks");
4296   assert_lock_strong(bitMapLock());
4297 
4298   // We might assume that we need not fill TLAB's when
4299   // CMSScavengeBeforeRemark is set, because we may have just done
4300   // a scavenge which would have filled all TLAB's -- and besides
4301   // Eden would be empty. This however may not always be the case --
4302   // for instance although we asked for a scavenge, it may not have


4312   gch->save_marks();
4313 
4314   if (CMSPrintEdenSurvivorChunks) {
4315     print_eden_and_survivor_chunk_arrays();
4316   }
4317 
4318   {
4319     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4320 
4321     // Note on the role of the mod union table:
4322     // Since the marker in "markFromRoots" marks concurrently with
4323     // mutators, it is possible for some reachable objects not to have been
4324     // scanned. For instance, an only reference to an object A was
4325     // placed in object B after the marker scanned B. Unless B is rescanned,
4326     // A would be collected. Such updates to references in marked objects
4327     // are detected via the mod union table which is the set of all cards
4328     // dirtied since the first checkpoint in this GC cycle and prior to
4329     // the most recent young generation GC, minus those cleaned up by the
4330     // concurrent precleaning.
4331     if (CMSParallelRemarkEnabled) {
4332       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4333       do_remark_parallel();
4334     } else {
4335       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4336                   _gc_timer_cm, _gc_tracer_cm->gc_id());
4337       do_remark_non_parallel();
4338     }
4339   }
4340   verify_work_stacks_empty();
4341   verify_overflow_empty();
4342 
4343   {
4344     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4345     refProcessingWork();
4346   }
4347   verify_work_stacks_empty();
4348   verify_overflow_empty();
4349 
4350   if (should_unload_classes()) {
4351     CodeCache::gc_epilogue();
4352   }
4353   JvmtiExport::gc_epilogue();
4354 
4355   // If we encountered any (marking stack / work queue) overflow
4356   // events during the current CMS cycle, take appropriate
4357   // remedial measures, where possible, so as to try and avoid
4358   // recurrence of that condition.
4359   assert(_markStack.isEmpty(), "No grey objects");
4360   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4361                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4362   if (ser_ovflw > 0) {
4363     if (PrintCMSStatistics != 0) {
4364       gclog_or_tty->print_cr("Marking stack overflow (benign) "


5099   // as a result of work_q overflow
5100   restore_preserved_marks_if_any();
5101 }
5102 
5103 // Non-parallel version of remark
5104 void CMSCollector::do_remark_non_parallel() {
5105   ResourceMark rm;
5106   HandleMark   hm;
5107   GenCollectedHeap* gch = GenCollectedHeap::heap();
5108   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5109 
5110   MarkRefsIntoAndScanClosure
5111     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5112              &_markStack, this,
5113              false /* should_yield */, false /* not precleaning */);
5114   MarkFromDirtyCardsClosure
5115     markFromDirtyCardsClosure(this, _span,
5116                               NULL,  // space is set further below
5117                               &_markBitMap, &_markStack, &mrias_cl);
5118   {
5119     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5120     // Iterate over the dirty cards, setting the corresponding bits in the
5121     // mod union table.
5122     {
5123       ModUnionClosure modUnionClosure(&_modUnionTable);
5124       _ct->ct_bs()->dirty_card_iterate(
5125                       _cmsGen->used_region(),
5126                       &modUnionClosure);
5127     }
5128     // Having transferred these marks into the modUnionTable, we just need
5129     // to rescan the marked objects on the dirty cards in the modUnionTable.
5130     // The initial marking may have been done during an asynchronous
5131     // collection so there may be dirty bits in the mod-union table.
5132     const int alignment =
5133       CardTableModRefBS::card_size * BitsPerWord;
5134     {
5135       // ... First handle dirty cards in CMS gen
5136       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5137       MemRegion ur = _cmsGen->used_region();
5138       HeapWord* lb = ur.start();
5139       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5140       MemRegion cms_span(lb, ub);
5141       _modUnionTable.dirty_range_iterate_clear(cms_span,
5142                                                &markFromDirtyCardsClosure);
5143       verify_work_stacks_empty();
5144       if (PrintCMSStatistics != 0) {
5145         gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5146           markFromDirtyCardsClosure.num_dirty_cards());
5147       }
5148     }
5149   }
5150   if (VerifyDuringGC &&
5151       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5152     HandleMark hm;  // Discard invalid handles created during verification
5153     Universe::verify();
5154   }
5155   {
5156     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5157 
5158     verify_work_stacks_empty();
5159 
5160     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5161     StrongRootsScope srs(1);
5162 
5163     gch->gen_process_roots(&srs,
5164                            GenCollectedHeap::OldGen,
5165                            true,  // young gen as roots
5166                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5167                            should_unload_classes(),
5168                            &mrias_cl,
5169                            NULL,
5170                            NULL); // The dirty klasses will be handled below
5171 
5172     assert(should_unload_classes()
5173            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5174            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5175   }
5176 
5177   {
5178     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5179 
5180     verify_work_stacks_empty();
5181 
5182     // Scan all class loader data objects that might have been introduced
5183     // during concurrent marking.
5184     ResourceMark rm;
5185     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5186     for (int i = 0; i < array->length(); i++) {
5187       mrias_cl.do_cld_nv(array->at(i));
5188     }
5189 
5190     // We don't need to keep track of new CLDs anymore.
5191     ClassLoaderDataGraph::remember_new_clds(false);
5192 
5193     verify_work_stacks_empty();
5194   }
5195 
5196   {
5197     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5198 
5199     verify_work_stacks_empty();
5200 
5201     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5202     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5203 
5204     verify_work_stacks_empty();
5205   }
5206 
5207   // We might have added oops to ClassLoaderData::_handles during the
5208   // concurrent marking phase. These oops point to newly allocated objects
5209   // that are guaranteed to be kept alive. Either by the direct allocation
5210   // code, or when the young collector processes the roots. Hence,
5211   // we don't have to revisit the _handles block during the remark phase.
5212 
5213   verify_work_stacks_empty();
5214   // Restore evacuated mark words, if any, used for overflow list links
5215   if (!CMSOverflowEarlyRestoration) {
5216     restore_preserved_marks_if_any();
5217   }


5386   workers->run_task(&enq_task);
5387 }
5388 
5389 void CMSCollector::refProcessingWork() {
5390   ResourceMark rm;
5391   HandleMark   hm;
5392 
5393   ReferenceProcessor* rp = ref_processor();
5394   assert(rp->span().equals(_span), "Spans should be equal");
5395   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5396   // Process weak references.
5397   rp->setup_policy(false);
5398   verify_work_stacks_empty();
5399 
5400   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5401                                           &_markStack, false /* !preclean */);
5402   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5403                                 _span, &_markBitMap, &_markStack,
5404                                 &cmsKeepAliveClosure, false /* !preclean */);
5405   {
5406     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5407 
5408     ReferenceProcessorStats stats;
5409     if (rp->processing_is_mt()) {
5410       // Set the degree of MT here.  If the discovery is done MT, there
5411       // may have been a different number of threads doing the discovery
5412       // and a different number of discovered lists may have Ref objects.
5413       // That is OK as long as the Reference lists are balanced (see
5414       // balance_all_queues() and balance_queues()).
5415       GenCollectedHeap* gch = GenCollectedHeap::heap();
5416       uint active_workers = ParallelGCThreads;
5417       WorkGang* workers = gch->workers();
5418       if (workers != NULL) {
5419         active_workers = workers->active_workers();
5420         // The expectation is that active_workers will have already
5421         // been set to a reasonable value.  If it has not been set,
5422         // investigate.
5423         assert(active_workers > 0, "Should have been set during scavenge");
5424       }
5425       rp->set_active_mt_degree(active_workers);
5426       CMSRefProcTaskExecutor task_executor(*this);
5427       stats = rp->process_discovered_references(&_is_alive_closure,
5428                                         &cmsKeepAliveClosure,
5429                                         &cmsDrainMarkingStackClosure,
5430                                         &task_executor,
5431                                         _gc_timer_cm,
5432                                         _gc_tracer_cm->gc_id());
5433     } else {
5434       stats = rp->process_discovered_references(&_is_alive_closure,
5435                                         &cmsKeepAliveClosure,
5436                                         &cmsDrainMarkingStackClosure,
5437                                         NULL,
5438                                         _gc_timer_cm,
5439                                         _gc_tracer_cm->gc_id());
5440     }
5441     _gc_tracer_cm->report_gc_reference_stats(stats);
5442 
5443   }
5444 
5445   // This is the point where the entire marking should have completed.
5446   verify_work_stacks_empty();
5447 
5448   if (should_unload_classes()) {
5449     {
5450       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5451 
5452       // Unload classes and purge the SystemDictionary.
5453       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5454 
5455       // Unload nmethods.
5456       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5457 
5458       // Prune dead klasses from subklass/sibling/implementor lists.
5459       Klass::clean_weak_klass_links(&_is_alive_closure);
5460     }
5461 
5462     {
5463       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5464       // Clean up unreferenced symbols in symbol table.
5465       SymbolTable::unlink();
5466     }
5467 
5468     {
5469       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5470       // Delete entries for dead interned strings.
5471       StringTable::unlink(&_is_alive_closure);
5472     }
5473   }
5474 
5475 
5476   // Restore any preserved marks as a result of mark stack or
5477   // work queue overflow
5478   restore_preserved_marks_if_any();  // done single-threaded for now
5479 
5480   rp->set_enqueuing_is_done(true);
5481   if (rp->processing_is_mt()) {
5482     rp->balance_all_queues();
5483     CMSRefProcTaskExecutor task_executor(*this);
5484     rp->enqueue_discovered_references(&task_executor);
5485   } else {
5486     rp->enqueue_discovered_references(NULL);
5487   }
5488   rp->verify_no_references_recorded();
5489   assert(!rp->discovery_enabled(), "should have been disabled");


5517   }
5518 }
5519 #endif
5520 
5521 void CMSCollector::sweep() {
5522   assert(_collectorState == Sweeping, "just checking");
5523   check_correct_thread_executing();
5524   verify_work_stacks_empty();
5525   verify_overflow_empty();
5526   increment_sweep_count();
5527   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5528 
5529   _inter_sweep_timer.stop();
5530   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5531 
5532   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5533   _intra_sweep_timer.reset();
5534   _intra_sweep_timer.start();
5535   {
5536     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5537     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5538     // First sweep the old gen
5539     {
5540       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5541                                bitMapLock());
5542       sweepWork(_cmsGen);
5543     }
5544 
5545     // Update Universe::_heap_*_at_gc figures.
5546     // We need all the free list locks to make the abstract state
5547     // transition from Sweeping to Resetting. See detailed note
5548     // further below.
5549     {
5550       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5551       // Update heap occupancy information which is used as
5552       // input to soft ref clearing policy at the next gc.
5553       Universe::update_heap_info_at_gc();
5554       _collectorState = Resizing;
5555     }
5556   }
5557   verify_work_stacks_empty();


5702   }
5703 }
5704 
5705 // Reset CMS data structures (for now just the marking bit map)
5706 // preparatory for the next cycle.
5707 void CMSCollector::reset(bool concurrent) {
5708   if (concurrent) {
5709     CMSTokenSyncWithLocks ts(true, bitMapLock());
5710 
5711     // If the state is not "Resetting", the foreground  thread
5712     // has done a collection and the resetting.
5713     if (_collectorState != Resetting) {
5714       assert(_collectorState == Idling, "The state should only change"
5715         " because the foreground collector has finished the collection");
5716       return;
5717     }
5718 
5719     // Clear the mark bitmap (no grey objects to start with)
5720     // for the next cycle.
5721     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5722     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5723 
5724     HeapWord* curAddr = _markBitMap.startWord();
5725     while (curAddr < _markBitMap.endWord()) {
5726       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5727       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5728       _markBitMap.clear_large_range(chunk);
5729       if (ConcurrentMarkSweepThread::should_yield() &&
5730           !foregroundGCIsActive() &&
5731           CMSYield) {
5732         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5733                "CMS thread should hold CMS token");
5734         assert_lock_strong(bitMapLock());
5735         bitMapLock()->unlock();
5736         ConcurrentMarkSweepThread::desynchronize(true);
5737         stopTimer();
5738         if (PrintCMSStatistics != 0) {
5739           incrementYields();
5740         }
5741 
5742         // See the comment in coordinator_yield()


5754     }
5755     // A successful mostly concurrent collection has been done.
5756     // Because only the full (i.e., concurrent mode failure) collections
5757     // are being measured for gc overhead limits, clean the "near" flag
5758     // and count.
5759     size_policy()->reset_gc_overhead_limit_count();
5760     _collectorState = Idling;
5761   } else {
5762     // already have the lock
5763     assert(_collectorState == Resetting, "just checking");
5764     assert_lock_strong(bitMapLock());
5765     _markBitMap.clear_all();
5766     _collectorState = Idling;
5767   }
5768 
5769   register_gc_end();
5770 }
5771 
5772 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5773   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5774   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5775   TraceCollectorStats tcs(counters());
5776 
5777   switch (op) {
5778     case CMS_op_checkpointRootsInitial: {
5779       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5780       checkpointRootsInitial();
5781       if (PrintGC) {
5782         _cmsGen->printOccupancy("initial-mark");
5783       }
5784       break;
5785     }
5786     case CMS_op_checkpointRootsFinal: {
5787       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5788       checkpointRootsFinal();
5789       if (PrintGC) {
5790         _cmsGen->printOccupancy("remark");
5791       }
5792       break;
5793     }
5794     default:




  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/cms/cmsCollectorPolicy.hpp"
  31 #include "gc/cms/cmsOopClosures.inline.hpp"
  32 #include "gc/cms/compactibleFreeListSpace.hpp"
  33 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  34 #include "gc/cms/concurrentMarkSweepThread.hpp"
  35 #include "gc/cms/parNewGeneration.hpp"
  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcId.hpp"
  46 #include "gc/shared/gcLocker.inline.hpp"
  47 #include "gc/shared/gcPolicyCounters.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.hpp"
  51 #include "gc/shared/genCollectedHeap.hpp"
  52 #include "gc/shared/genOopClosures.inline.hpp"
  53 #include "gc/shared/isGCActiveMark.hpp"
  54 #include "gc/shared/referencePolicy.hpp"
  55 #include "gc/shared/strongRootsScope.hpp"
  56 #include "gc/shared/taskqueue.inline.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/iterator.inline.hpp"
  59 #include "memory/padded.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/atomic.inline.hpp"
  64 #include "runtime/globals_extension.hpp"
  65 #include "runtime/handles.inline.hpp"


1577 // after obtaining the free list locks for the
1578 // two generations.
1579 void CMSCollector::compute_new_size() {
1580   assert_locked_or_safepoint(Heap_lock);
1581   FreelistLocker z(this);
1582   MetaspaceGC::compute_new_size();
1583   _cmsGen->compute_new_size_free_list();
1584 }
1585 
1586 // A work method used by the foreground collector to do
1587 // a mark-sweep-compact.
1588 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1589   GenCollectedHeap* gch = GenCollectedHeap::heap();
1590 
1591   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1592   gc_timer->register_gc_start();
1593 
1594   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1595   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1596 
1597   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1598 
1599   // Temporarily widen the span of the weak reference processing to
1600   // the entire heap.
1601   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1602   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1603   // Temporarily, clear the "is_alive_non_header" field of the
1604   // reference processor.
1605   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1606   // Temporarily make reference _processing_ single threaded (non-MT).
1607   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1608   // Temporarily make refs discovery atomic
1609   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1610   // Temporarily make reference _discovery_ single threaded (non-MT)
1611   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1612 
1613   ref_processor()->set_enqueuing_is_done(false);
1614   ref_processor()->enable_discovery();
1615   ref_processor()->setup_policy(clear_all_soft_refs);
1616   // If an asynchronous collection finishes, the _modUnionTable is
1617   // all clear.  If we are assuming the collection from an asynchronous


2809 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2810   assert_locked_or_safepoint(Heap_lock);
2811 }
2812 
2813 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2814   assert_locked_or_safepoint(Heap_lock);
2815   assert_lock_strong(freelistLock());
2816   if (PrintGCDetails && Verbose) {
2817     warning("Shrinking of CMS not yet implemented");
2818   }
2819   return;
2820 }
2821 
2822 
2823 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2824 // phases.
2825 class CMSPhaseAccounting: public StackObj {
2826  public:
2827   CMSPhaseAccounting(CMSCollector *collector,
2828                      const char *phase,

2829                      bool print_cr = true);
2830   ~CMSPhaseAccounting();
2831 
2832  private:
2833   CMSCollector *_collector;
2834   const char *_phase;
2835   elapsedTimer _wallclock;
2836   bool _print_cr;

2837 
2838  public:
2839   // Not MT-safe; so do not pass around these StackObj's
2840   // where they may be accessed by other threads.
2841   jlong wallclock_millis() {
2842     assert(_wallclock.is_active(), "Wall clock should not stop");
2843     _wallclock.stop();  // to record time
2844     jlong ret = _wallclock.milliseconds();
2845     _wallclock.start(); // restart
2846     return ret;
2847   }
2848 };
2849 
2850 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2851                                        const char *phase,

2852                                        bool print_cr) :
2853   _collector(collector), _phase(phase), _print_cr(print_cr) {
2854 
2855   if (PrintCMSStatistics != 0) {
2856     _collector->resetYields();
2857   }
2858   if (PrintGCDetails) {
2859     gclog_or_tty->gclog_stamp();
2860     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2861       _collector->cmsGen()->short_name(), _phase);
2862   }
2863   _collector->resetTimer();
2864   _wallclock.start();
2865   _collector->startTimer();
2866 }
2867 
2868 CMSPhaseAccounting::~CMSPhaseAccounting() {
2869   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2870   _collector->stopTimer();
2871   _wallclock.stop();
2872   if (PrintGCDetails) {
2873     gclog_or_tty->gclog_stamp();
2874     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2875                  _collector->cmsGen()->short_name(),
2876                  _phase, _collector->timerValue(), _wallclock.seconds());
2877     if (_print_cr) {
2878       gclog_or_tty->cr();
2879     }
2880     if (PrintCMSStatistics != 0) {
2881       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2882                     _collector->yields());
2883     }
2884   }
2885 }
2886 
2887 // CMS work
2888 
2889 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2890 class CMSParMarkTask : public AbstractGangTask {
2891  protected:
2892   CMSCollector*     _collector;
2893   uint              _n_workers;


2932     checkpointRootsInitialWork();
2933     // enable ("weak") refs discovery
2934     rp->enable_discovery();
2935     _collectorState = Marking;
2936   }
2937 }
2938 
2939 void CMSCollector::checkpointRootsInitialWork() {
2940   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2941   assert(_collectorState == InitialMarking, "just checking");
2942 
2943   // Already have locks.
2944   assert_lock_strong(bitMapLock());
2945   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2946 
2947   // Setup the verification and class unloading state for this
2948   // CMS collection cycle.
2949   setup_cms_unloading_and_verification_state();
2950 
2951   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2952     PrintGCDetails && Verbose, true, _gc_timer_cm);)
2953 
2954   // Reset all the PLAB chunk arrays if necessary.
2955   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2956     reset_survivor_plab_arrays();
2957   }
2958 
2959   ResourceMark rm;
2960   HandleMark  hm;
2961 
2962   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2963   GenCollectedHeap* gch = GenCollectedHeap::heap();
2964 
2965   verify_work_stacks_empty();
2966   verify_overflow_empty();
2967 
2968   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2969   // Update the saved marks which may affect the root scans.
2970   gch->save_marks();
2971 
2972   // weak reference processing has not started yet.


3035   verify_overflow_empty();
3036 }
3037 
3038 bool CMSCollector::markFromRoots() {
3039   // we might be tempted to assert that:
3040   // assert(!SafepointSynchronize::is_at_safepoint(),
3041   //        "inconsistent argument?");
3042   // However that wouldn't be right, because it's possible that
3043   // a safepoint is indeed in progress as a young generation
3044   // stop-the-world GC happens even as we mark in this generation.
3045   assert(_collectorState == Marking, "inconsistent state?");
3046   check_correct_thread_executing();
3047   verify_overflow_empty();
3048 
3049   // Weak ref discovery note: We may be discovering weak
3050   // refs in this generation concurrent (but interleaved) with
3051   // weak ref discovery by the young generation collector.
3052 
3053   CMSTokenSyncWithLocks ts(true, bitMapLock());
3054   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3055   CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3056   bool res = markFromRootsWork();
3057   if (res) {
3058     _collectorState = Precleaning;
3059   } else { // We failed and a foreground collection wants to take over
3060     assert(_foregroundGCIsActive, "internal state inconsistency");
3061     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3062     if (PrintGCDetails) {
3063       gclog_or_tty->print_cr("bailing out to foreground collection");
3064     }
3065   }
3066   verify_overflow_empty();
3067   return res;
3068 }
3069 
3070 bool CMSCollector::markFromRootsWork() {
3071   // iterate over marked bits in bit map, doing a full scan and mark
3072   // from these roots using the following algorithm:
3073   // . if oop is to the right of the current scan pointer,
3074   //   mark corresponding bit (we'll process it later)
3075   // . else (oop is to left of current scan pointer)


3732   check_correct_thread_executing();
3733   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3734   verify_work_stacks_empty();
3735   verify_overflow_empty();
3736   _abort_preclean = false;
3737   if (CMSPrecleaningEnabled) {
3738     if (!CMSEdenChunksRecordAlways) {
3739       _eden_chunk_index = 0;
3740     }
3741     size_t used = get_eden_used();
3742     size_t capacity = get_eden_capacity();
3743     // Don't start sampling unless we will get sufficiently
3744     // many samples.
3745     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3746                 * CMSScheduleRemarkEdenPenetration)) {
3747       _start_sampling = true;
3748     } else {
3749       _start_sampling = false;
3750     }
3751     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3752     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3753     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3754   }
3755   CMSTokenSync x(true); // is cms thread
3756   if (CMSPrecleaningEnabled) {
3757     sample_eden();
3758     _collectorState = AbortablePreclean;
3759   } else {
3760     _collectorState = FinalMarking;
3761   }
3762   verify_work_stacks_empty();
3763   verify_overflow_empty();
3764 }
3765 
3766 // Try and schedule the remark such that young gen
3767 // occupancy is CMSScheduleRemarkEdenPenetration %.
3768 void CMSCollector::abortable_preclean() {
3769   check_correct_thread_executing();
3770   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3771   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3772 
3773   // If Eden's current occupancy is below this threshold,
3774   // immediately schedule the remark; else preclean
3775   // past the next scavenge in an effort to
3776   // schedule the pause as described above. By choosing
3777   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3778   // we will never do an actual abortable preclean cycle.
3779   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3780     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3781     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3782     // We need more smarts in the abortable preclean
3783     // loop below to deal with cases where allocation
3784     // in young gen is very very slow, and our precleaning
3785     // is running a losing race against a horde of
3786     // mutators intent on flooding us with CMS updates
3787     // (dirty cards).
3788     // One, admittedly dumb, strategy is to give up
3789     // after a certain number of abortable precleaning loops
3790     // or after a certain maximum time. We want to make
3791     // this smarter in the next iteration.
3792     // XXX FIX ME!!! YSR
3793     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3794     while (!(should_abort_preclean() ||
3795              ConcurrentMarkSweepThread::should_terminate())) {
3796       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3797       cumworkdone += workdone;
3798       loops++;
3799       // Voluntarily terminate abortable preclean phase if we have
3800       // been at it for too long.
3801       if ((CMSMaxAbortablePrecleanLoops != 0) &&


3906     // Note that we don't need to protect ourselves from
3907     // interference with mutators because they can't
3908     // manipulate the discovered reference lists nor affect
3909     // the computed reachability of the referents, the
3910     // only properties manipulated by the precleaning
3911     // of these reference lists.
3912     stopTimer();
3913     CMSTokenSyncWithLocks x(true /* is cms thread */,
3914                             bitMapLock());
3915     startTimer();
3916     sample_eden();
3917 
3918     // The following will yield to allow foreground
3919     // collection to proceed promptly. XXX YSR:
3920     // The code in this method may need further
3921     // tweaking for better performance and some restructuring
3922     // for cleaner interfaces.
3923     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3924     rp->preclean_discovered_references(
3925           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3926           gc_timer);
3927   }
3928 
3929   if (clean_survivor) {  // preclean the active survivor space(s)
3930     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3931                              &_markBitMap, &_modUnionTable,
3932                              &_markStack, true /* precleaning phase */);
3933     stopTimer();
3934     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3935                              bitMapLock());
3936     startTimer();
3937     unsigned int before_count =
3938       GenCollectedHeap::heap()->total_collections();
3939     SurvivorSpacePrecleanClosure
3940       sss_cl(this, _span, &_markBitMap, &_markStack,
3941              &pam_cl, before_count, CMSYield);
3942     _young_gen->from()->object_iterate_careful(&sss_cl);
3943     _young_gen->to()->object_iterate_careful(&sss_cl);
3944   }
3945   MarkRefsIntoAndScanClosure
3946     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,


4242   // world is stopped at this checkpoint
4243   assert(SafepointSynchronize::is_at_safepoint(),
4244          "world should be stopped");
4245   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4246 
4247   verify_work_stacks_empty();
4248   verify_overflow_empty();
4249 
4250   if (PrintGCDetails) {
4251     gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4252                         _young_gen->used() / K,
4253                         _young_gen->capacity() / K);
4254   }
4255   {
4256     if (CMSScavengeBeforeRemark) {
4257       GenCollectedHeap* gch = GenCollectedHeap::heap();
4258       // Temporarily set flag to false, GCH->do_collection will
4259       // expect it to be false and set to true
4260       FlagSetting fl(gch->_is_gc_active, false);
4261       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4262         PrintGCDetails && Verbose, true, _gc_timer_cm);)
4263       gch->do_collection(true,                      // full (i.e. force, see below)
4264                          false,                     // !clear_all_soft_refs
4265                          0,                         // size
4266                          false,                     // is_tlab
4267                          GenCollectedHeap::YoungGen // type
4268         );
4269     }
4270     FreelistLocker x(this);
4271     MutexLockerEx y(bitMapLock(),
4272                     Mutex::_no_safepoint_check_flag);
4273     checkpointRootsFinalWork();
4274   }
4275   verify_work_stacks_empty();
4276   verify_overflow_empty();
4277 }
4278 
4279 void CMSCollector::checkpointRootsFinalWork() {
4280   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4281 
4282   assert(haveFreelistLocks(), "must have free list locks");
4283   assert_lock_strong(bitMapLock());
4284 
4285   ResourceMark rm;
4286   HandleMark   hm;
4287 
4288   GenCollectedHeap* gch = GenCollectedHeap::heap();
4289 
4290   if (should_unload_classes()) {
4291     CodeCache::gc_prologue();
4292   }
4293   assert(haveFreelistLocks(), "must have free list locks");
4294   assert_lock_strong(bitMapLock());
4295 
4296   // We might assume that we need not fill TLAB's when
4297   // CMSScavengeBeforeRemark is set, because we may have just done
4298   // a scavenge which would have filled all TLAB's -- and besides
4299   // Eden would be empty. This however may not always be the case --
4300   // for instance although we asked for a scavenge, it may not have


4310   gch->save_marks();
4311 
4312   if (CMSPrintEdenSurvivorChunks) {
4313     print_eden_and_survivor_chunk_arrays();
4314   }
4315 
4316   {
4317     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4318 
4319     // Note on the role of the mod union table:
4320     // Since the marker in "markFromRoots" marks concurrently with
4321     // mutators, it is possible for some reachable objects not to have been
4322     // scanned. For instance, an only reference to an object A was
4323     // placed in object B after the marker scanned B. Unless B is rescanned,
4324     // A would be collected. Such updates to references in marked objects
4325     // are detected via the mod union table which is the set of all cards
4326     // dirtied since the first checkpoint in this GC cycle and prior to
4327     // the most recent young generation GC, minus those cleaned up by the
4328     // concurrent precleaning.
4329     if (CMSParallelRemarkEnabled) {
4330       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4331       do_remark_parallel();
4332     } else {
4333       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);

4334       do_remark_non_parallel();
4335     }
4336   }
4337   verify_work_stacks_empty();
4338   verify_overflow_empty();
4339 
4340   {
4341     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
4342     refProcessingWork();
4343   }
4344   verify_work_stacks_empty();
4345   verify_overflow_empty();
4346 
4347   if (should_unload_classes()) {
4348     CodeCache::gc_epilogue();
4349   }
4350   JvmtiExport::gc_epilogue();
4351 
4352   // If we encountered any (marking stack / work queue) overflow
4353   // events during the current CMS cycle, take appropriate
4354   // remedial measures, where possible, so as to try and avoid
4355   // recurrence of that condition.
4356   assert(_markStack.isEmpty(), "No grey objects");
4357   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4358                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4359   if (ser_ovflw > 0) {
4360     if (PrintCMSStatistics != 0) {
4361       gclog_or_tty->print_cr("Marking stack overflow (benign) "


5096   // as a result of work_q overflow
5097   restore_preserved_marks_if_any();
5098 }
5099 
5100 // Non-parallel version of remark
5101 void CMSCollector::do_remark_non_parallel() {
5102   ResourceMark rm;
5103   HandleMark   hm;
5104   GenCollectedHeap* gch = GenCollectedHeap::heap();
5105   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5106 
5107   MarkRefsIntoAndScanClosure
5108     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5109              &_markStack, this,
5110              false /* should_yield */, false /* not precleaning */);
5111   MarkFromDirtyCardsClosure
5112     markFromDirtyCardsClosure(this, _span,
5113                               NULL,  // space is set further below
5114                               &_markBitMap, &_markStack, &mrias_cl);
5115   {
5116     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5117     // Iterate over the dirty cards, setting the corresponding bits in the
5118     // mod union table.
5119     {
5120       ModUnionClosure modUnionClosure(&_modUnionTable);
5121       _ct->ct_bs()->dirty_card_iterate(
5122                       _cmsGen->used_region(),
5123                       &modUnionClosure);
5124     }
5125     // Having transferred these marks into the modUnionTable, we just need
5126     // to rescan the marked objects on the dirty cards in the modUnionTable.
5127     // The initial marking may have been done during an asynchronous
5128     // collection so there may be dirty bits in the mod-union table.
5129     const int alignment =
5130       CardTableModRefBS::card_size * BitsPerWord;
5131     {
5132       // ... First handle dirty cards in CMS gen
5133       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5134       MemRegion ur = _cmsGen->used_region();
5135       HeapWord* lb = ur.start();
5136       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5137       MemRegion cms_span(lb, ub);
5138       _modUnionTable.dirty_range_iterate_clear(cms_span,
5139                                                &markFromDirtyCardsClosure);
5140       verify_work_stacks_empty();
5141       if (PrintCMSStatistics != 0) {
5142         gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5143           markFromDirtyCardsClosure.num_dirty_cards());
5144       }
5145     }
5146   }
5147   if (VerifyDuringGC &&
5148       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5149     HandleMark hm;  // Discard invalid handles created during verification
5150     Universe::verify();
5151   }
5152   {
5153     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5154 
5155     verify_work_stacks_empty();
5156 
5157     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5158     StrongRootsScope srs(1);
5159 
5160     gch->gen_process_roots(&srs,
5161                            GenCollectedHeap::OldGen,
5162                            true,  // young gen as roots
5163                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5164                            should_unload_classes(),
5165                            &mrias_cl,
5166                            NULL,
5167                            NULL); // The dirty klasses will be handled below
5168 
5169     assert(should_unload_classes()
5170            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5171            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5172   }
5173 
5174   {
5175     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5176 
5177     verify_work_stacks_empty();
5178 
5179     // Scan all class loader data objects that might have been introduced
5180     // during concurrent marking.
5181     ResourceMark rm;
5182     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5183     for (int i = 0; i < array->length(); i++) {
5184       mrias_cl.do_cld_nv(array->at(i));
5185     }
5186 
5187     // We don't need to keep track of new CLDs anymore.
5188     ClassLoaderDataGraph::remember_new_clds(false);
5189 
5190     verify_work_stacks_empty();
5191   }
5192 
5193   {
5194     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5195 
5196     verify_work_stacks_empty();
5197 
5198     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5199     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5200 
5201     verify_work_stacks_empty();
5202   }
5203 
5204   // We might have added oops to ClassLoaderData::_handles during the
5205   // concurrent marking phase. These oops point to newly allocated objects
5206   // that are guaranteed to be kept alive. Either by the direct allocation
5207   // code, or when the young collector processes the roots. Hence,
5208   // we don't have to revisit the _handles block during the remark phase.
5209 
5210   verify_work_stacks_empty();
5211   // Restore evacuated mark words, if any, used for overflow list links
5212   if (!CMSOverflowEarlyRestoration) {
5213     restore_preserved_marks_if_any();
5214   }


5383   workers->run_task(&enq_task);
5384 }
5385 
5386 void CMSCollector::refProcessingWork() {
5387   ResourceMark rm;
5388   HandleMark   hm;
5389 
5390   ReferenceProcessor* rp = ref_processor();
5391   assert(rp->span().equals(_span), "Spans should be equal");
5392   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5393   // Process weak references.
5394   rp->setup_policy(false);
5395   verify_work_stacks_empty();
5396 
5397   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5398                                           &_markStack, false /* !preclean */);
5399   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5400                                 _span, &_markBitMap, &_markStack,
5401                                 &cmsKeepAliveClosure, false /* !preclean */);
5402   {
5403     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5404 
5405     ReferenceProcessorStats stats;
5406     if (rp->processing_is_mt()) {
5407       // Set the degree of MT here.  If the discovery is done MT, there
5408       // may have been a different number of threads doing the discovery
5409       // and a different number of discovered lists may have Ref objects.
5410       // That is OK as long as the Reference lists are balanced (see
5411       // balance_all_queues() and balance_queues()).
5412       GenCollectedHeap* gch = GenCollectedHeap::heap();
5413       uint active_workers = ParallelGCThreads;
5414       WorkGang* workers = gch->workers();
5415       if (workers != NULL) {
5416         active_workers = workers->active_workers();
5417         // The expectation is that active_workers will have already
5418         // been set to a reasonable value.  If it has not been set,
5419         // investigate.
5420         assert(active_workers > 0, "Should have been set during scavenge");
5421       }
5422       rp->set_active_mt_degree(active_workers);
5423       CMSRefProcTaskExecutor task_executor(*this);
5424       stats = rp->process_discovered_references(&_is_alive_closure,
5425                                         &cmsKeepAliveClosure,
5426                                         &cmsDrainMarkingStackClosure,
5427                                         &task_executor,
5428                                         _gc_timer_cm);

5429     } else {
5430       stats = rp->process_discovered_references(&_is_alive_closure,
5431                                         &cmsKeepAliveClosure,
5432                                         &cmsDrainMarkingStackClosure,
5433                                         NULL,
5434                                         _gc_timer_cm);

5435     }
5436     _gc_tracer_cm->report_gc_reference_stats(stats);
5437 
5438   }
5439 
5440   // This is the point where the entire marking should have completed.
5441   verify_work_stacks_empty();
5442 
5443   if (should_unload_classes()) {
5444     {
5445       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5446 
5447       // Unload classes and purge the SystemDictionary.
5448       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5449 
5450       // Unload nmethods.
5451       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5452 
5453       // Prune dead klasses from subklass/sibling/implementor lists.
5454       Klass::clean_weak_klass_links(&_is_alive_closure);
5455     }
5456 
5457     {
5458       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5459       // Clean up unreferenced symbols in symbol table.
5460       SymbolTable::unlink();
5461     }
5462 
5463     {
5464       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5465       // Delete entries for dead interned strings.
5466       StringTable::unlink(&_is_alive_closure);
5467     }
5468   }
5469 
5470 
5471   // Restore any preserved marks as a result of mark stack or
5472   // work queue overflow
5473   restore_preserved_marks_if_any();  // done single-threaded for now
5474 
5475   rp->set_enqueuing_is_done(true);
5476   if (rp->processing_is_mt()) {
5477     rp->balance_all_queues();
5478     CMSRefProcTaskExecutor task_executor(*this);
5479     rp->enqueue_discovered_references(&task_executor);
5480   } else {
5481     rp->enqueue_discovered_references(NULL);
5482   }
5483   rp->verify_no_references_recorded();
5484   assert(!rp->discovery_enabled(), "should have been disabled");


5512   }
5513 }
5514 #endif
5515 
5516 void CMSCollector::sweep() {
5517   assert(_collectorState == Sweeping, "just checking");
5518   check_correct_thread_executing();
5519   verify_work_stacks_empty();
5520   verify_overflow_empty();
5521   increment_sweep_count();
5522   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5523 
5524   _inter_sweep_timer.stop();
5525   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5526 
5527   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5528   _intra_sweep_timer.reset();
5529   _intra_sweep_timer.start();
5530   {
5531     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5532     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5533     // First sweep the old gen
5534     {
5535       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5536                                bitMapLock());
5537       sweepWork(_cmsGen);
5538     }
5539 
5540     // Update Universe::_heap_*_at_gc figures.
5541     // We need all the free list locks to make the abstract state
5542     // transition from Sweeping to Resetting. See detailed note
5543     // further below.
5544     {
5545       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5546       // Update heap occupancy information which is used as
5547       // input to soft ref clearing policy at the next gc.
5548       Universe::update_heap_info_at_gc();
5549       _collectorState = Resizing;
5550     }
5551   }
5552   verify_work_stacks_empty();


5697   }
5698 }
5699 
5700 // Reset CMS data structures (for now just the marking bit map)
5701 // preparatory for the next cycle.
5702 void CMSCollector::reset(bool concurrent) {
5703   if (concurrent) {
5704     CMSTokenSyncWithLocks ts(true, bitMapLock());
5705 
5706     // If the state is not "Resetting", the foreground  thread
5707     // has done a collection and the resetting.
5708     if (_collectorState != Resetting) {
5709       assert(_collectorState == Idling, "The state should only change"
5710         " because the foreground collector has finished the collection");
5711       return;
5712     }
5713 
5714     // Clear the mark bitmap (no grey objects to start with)
5715     // for the next cycle.
5716     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5717     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5718 
5719     HeapWord* curAddr = _markBitMap.startWord();
5720     while (curAddr < _markBitMap.endWord()) {
5721       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5722       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5723       _markBitMap.clear_large_range(chunk);
5724       if (ConcurrentMarkSweepThread::should_yield() &&
5725           !foregroundGCIsActive() &&
5726           CMSYield) {
5727         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5728                "CMS thread should hold CMS token");
5729         assert_lock_strong(bitMapLock());
5730         bitMapLock()->unlock();
5731         ConcurrentMarkSweepThread::desynchronize(true);
5732         stopTimer();
5733         if (PrintCMSStatistics != 0) {
5734           incrementYields();
5735         }
5736 
5737         // See the comment in coordinator_yield()


5749     }
5750     // A successful mostly concurrent collection has been done.
5751     // Because only the full (i.e., concurrent mode failure) collections
5752     // are being measured for gc overhead limits, clean the "near" flag
5753     // and count.
5754     size_policy()->reset_gc_overhead_limit_count();
5755     _collectorState = Idling;
5756   } else {
5757     // already have the lock
5758     assert(_collectorState == Resetting, "just checking");
5759     assert_lock_strong(bitMapLock());
5760     _markBitMap.clear_all();
5761     _collectorState = Idling;
5762   }
5763 
5764   register_gc_end();
5765 }
5766 
5767 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5768   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5769   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
5770   TraceCollectorStats tcs(counters());
5771 
5772   switch (op) {
5773     case CMS_op_checkpointRootsInitial: {
5774       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5775       checkpointRootsInitial();
5776       if (PrintGC) {
5777         _cmsGen->printOccupancy("initial-mark");
5778       }
5779       break;
5780     }
5781     case CMS_op_checkpointRootsFinal: {
5782       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5783       checkpointRootsFinal();
5784       if (PrintGC) {
5785         _cmsGen->printOccupancy("remark");
5786       }
5787       break;
5788     }
5789     default:


< prev index next >