1576 // after obtaining the free list locks for the
1577 // two generations.
1578 void CMSCollector::compute_new_size() {
1579 assert_locked_or_safepoint(Heap_lock);
1580 FreelistLocker z(this);
1581 MetaspaceGC::compute_new_size();
1582 _cmsGen->compute_new_size_free_list();
1583 }
1584
1585 // A work method used by the foreground collector to do
1586 // a mark-sweep-compact.
1587 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1588 GenCollectedHeap* gch = GenCollectedHeap::heap();
1589
1590 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1591 gc_timer->register_gc_start();
1592
1593 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1594 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1595
1596 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1597
1598 // Temporarily widen the span of the weak reference processing to
1599 // the entire heap.
1600 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1601 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1602 // Temporarily, clear the "is_alive_non_header" field of the
1603 // reference processor.
1604 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1605 // Temporarily make reference _processing_ single threaded (non-MT).
1606 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1607 // Temporarily make refs discovery atomic
1608 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1609 // Temporarily make reference _discovery_ single threaded (non-MT)
1610 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1611
1612 ref_processor()->set_enqueuing_is_done(false);
1613 ref_processor()->enable_discovery();
1614 ref_processor()->setup_policy(clear_all_soft_refs);
1615 // If an asynchronous collection finishes, the _modUnionTable is
1616 // all clear. If we are assuming the collection from an asynchronous
2808 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2809 assert_locked_or_safepoint(Heap_lock);
2810 }
2811
2812 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2813 assert_locked_or_safepoint(Heap_lock);
2814 assert_lock_strong(freelistLock());
2815 if (PrintGCDetails && Verbose) {
2816 warning("Shrinking of CMS not yet implemented");
2817 }
2818 return;
2819 }
2820
2821
2822 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2823 // phases.
2824 class CMSPhaseAccounting: public StackObj {
2825 public:
2826 CMSPhaseAccounting(CMSCollector *collector,
2827 const char *phase,
2828 const GCId gc_id,
2829 bool print_cr = true);
2830 ~CMSPhaseAccounting();
2831
2832 private:
2833 CMSCollector *_collector;
2834 const char *_phase;
2835 elapsedTimer _wallclock;
2836 bool _print_cr;
2837 const GCId _gc_id;
2838
2839 public:
2840 // Not MT-safe; so do not pass around these StackObj's
2841 // where they may be accessed by other threads.
2842 jlong wallclock_millis() {
2843 assert(_wallclock.is_active(), "Wall clock should not stop");
2844 _wallclock.stop(); // to record time
2845 jlong ret = _wallclock.milliseconds();
2846 _wallclock.start(); // restart
2847 return ret;
2848 }
2849 };
2850
2851 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2852 const char *phase,
2853 const GCId gc_id,
2854 bool print_cr) :
2855 _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2856
2857 if (PrintCMSStatistics != 0) {
2858 _collector->resetYields();
2859 }
2860 if (PrintGCDetails) {
2861 gclog_or_tty->gclog_stamp(_gc_id);
2862 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2863 _collector->cmsGen()->short_name(), _phase);
2864 }
2865 _collector->resetTimer();
2866 _wallclock.start();
2867 _collector->startTimer();
2868 }
2869
2870 CMSPhaseAccounting::~CMSPhaseAccounting() {
2871 assert(_wallclock.is_active(), "Wall clock should not have stopped");
2872 _collector->stopTimer();
2873 _wallclock.stop();
2874 if (PrintGCDetails) {
2875 gclog_or_tty->gclog_stamp(_gc_id);
2876 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2877 _collector->cmsGen()->short_name(),
2878 _phase, _collector->timerValue(), _wallclock.seconds());
2879 if (_print_cr) {
2880 gclog_or_tty->cr();
2881 }
2882 if (PrintCMSStatistics != 0) {
2883 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2884 _collector->yields());
2885 }
2886 }
2887 }
2888
2889 // CMS work
2890
2891 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2892 class CMSParMarkTask : public AbstractGangTask {
2893 protected:
2894 CMSCollector* _collector;
2895 uint _n_workers;
2934 checkpointRootsInitialWork();
2935 // enable ("weak") refs discovery
2936 rp->enable_discovery();
2937 _collectorState = Marking;
2938 }
2939 }
2940
2941 void CMSCollector::checkpointRootsInitialWork() {
2942 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2943 assert(_collectorState == InitialMarking, "just checking");
2944
2945 // Already have locks.
2946 assert_lock_strong(bitMapLock());
2947 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2948
2949 // Setup the verification and class unloading state for this
2950 // CMS collection cycle.
2951 setup_cms_unloading_and_verification_state();
2952
2953 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2954 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
2955
2956 // Reset all the PLAB chunk arrays if necessary.
2957 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2958 reset_survivor_plab_arrays();
2959 }
2960
2961 ResourceMark rm;
2962 HandleMark hm;
2963
2964 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2965 GenCollectedHeap* gch = GenCollectedHeap::heap();
2966
2967 verify_work_stacks_empty();
2968 verify_overflow_empty();
2969
2970 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2971 // Update the saved marks which may affect the root scans.
2972 gch->save_marks();
2973
2974 // weak reference processing has not started yet.
3037 verify_overflow_empty();
3038 }
3039
3040 bool CMSCollector::markFromRoots() {
3041 // we might be tempted to assert that:
3042 // assert(!SafepointSynchronize::is_at_safepoint(),
3043 // "inconsistent argument?");
3044 // However that wouldn't be right, because it's possible that
3045 // a safepoint is indeed in progress as a young generation
3046 // stop-the-world GC happens even as we mark in this generation.
3047 assert(_collectorState == Marking, "inconsistent state?");
3048 check_correct_thread_executing();
3049 verify_overflow_empty();
3050
3051 // Weak ref discovery note: We may be discovering weak
3052 // refs in this generation concurrent (but interleaved) with
3053 // weak ref discovery by the young generation collector.
3054
3055 CMSTokenSyncWithLocks ts(true, bitMapLock());
3056 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3057 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3058 bool res = markFromRootsWork();
3059 if (res) {
3060 _collectorState = Precleaning;
3061 } else { // We failed and a foreground collection wants to take over
3062 assert(_foregroundGCIsActive, "internal state inconsistency");
3063 assert(_restart_addr == NULL, "foreground will restart from scratch");
3064 if (PrintGCDetails) {
3065 gclog_or_tty->print_cr("bailing out to foreground collection");
3066 }
3067 }
3068 verify_overflow_empty();
3069 return res;
3070 }
3071
3072 bool CMSCollector::markFromRootsWork() {
3073 // iterate over marked bits in bit map, doing a full scan and mark
3074 // from these roots using the following algorithm:
3075 // . if oop is to the right of the current scan pointer,
3076 // mark corresponding bit (we'll process it later)
3077 // . else (oop is to left of current scan pointer)
3734 check_correct_thread_executing();
3735 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3736 verify_work_stacks_empty();
3737 verify_overflow_empty();
3738 _abort_preclean = false;
3739 if (CMSPrecleaningEnabled) {
3740 if (!CMSEdenChunksRecordAlways) {
3741 _eden_chunk_index = 0;
3742 }
3743 size_t used = get_eden_used();
3744 size_t capacity = get_eden_capacity();
3745 // Don't start sampling unless we will get sufficiently
3746 // many samples.
3747 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3748 * CMSScheduleRemarkEdenPenetration)) {
3749 _start_sampling = true;
3750 } else {
3751 _start_sampling = false;
3752 }
3753 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3754 CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3755 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3756 }
3757 CMSTokenSync x(true); // is cms thread
3758 if (CMSPrecleaningEnabled) {
3759 sample_eden();
3760 _collectorState = AbortablePreclean;
3761 } else {
3762 _collectorState = FinalMarking;
3763 }
3764 verify_work_stacks_empty();
3765 verify_overflow_empty();
3766 }
3767
3768 // Try and schedule the remark such that young gen
3769 // occupancy is CMSScheduleRemarkEdenPenetration %.
3770 void CMSCollector::abortable_preclean() {
3771 check_correct_thread_executing();
3772 assert(CMSPrecleaningEnabled, "Inconsistent control state");
3773 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3774
3775 // If Eden's current occupancy is below this threshold,
3776 // immediately schedule the remark; else preclean
3777 // past the next scavenge in an effort to
3778 // schedule the pause as described above. By choosing
3779 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3780 // we will never do an actual abortable preclean cycle.
3781 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3782 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3783 CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3784 // We need more smarts in the abortable preclean
3785 // loop below to deal with cases where allocation
3786 // in young gen is very very slow, and our precleaning
3787 // is running a losing race against a horde of
3788 // mutators intent on flooding us with CMS updates
3789 // (dirty cards).
3790 // One, admittedly dumb, strategy is to give up
3791 // after a certain number of abortable precleaning loops
3792 // or after a certain maximum time. We want to make
3793 // this smarter in the next iteration.
3794 // XXX FIX ME!!! YSR
3795 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3796 while (!(should_abort_preclean() ||
3797 ConcurrentMarkSweepThread::should_terminate())) {
3798 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3799 cumworkdone += workdone;
3800 loops++;
3801 // Voluntarily terminate abortable preclean phase if we have
3802 // been at it for too long.
3803 if ((CMSMaxAbortablePrecleanLoops != 0) &&
3908 // Note that we don't need to protect ourselves from
3909 // interference with mutators because they can't
3910 // manipulate the discovered reference lists nor affect
3911 // the computed reachability of the referents, the
3912 // only properties manipulated by the precleaning
3913 // of these reference lists.
3914 stopTimer();
3915 CMSTokenSyncWithLocks x(true /* is cms thread */,
3916 bitMapLock());
3917 startTimer();
3918 sample_eden();
3919
3920 // The following will yield to allow foreground
3921 // collection to proceed promptly. XXX YSR:
3922 // The code in this method may need further
3923 // tweaking for better performance and some restructuring
3924 // for cleaner interfaces.
3925 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3926 rp->preclean_discovered_references(
3927 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3928 gc_timer, _gc_tracer_cm->gc_id());
3929 }
3930
3931 if (clean_survivor) { // preclean the active survivor space(s)
3932 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3933 &_markBitMap, &_modUnionTable,
3934 &_markStack, true /* precleaning phase */);
3935 stopTimer();
3936 CMSTokenSyncWithLocks ts(true /* is cms thread */,
3937 bitMapLock());
3938 startTimer();
3939 unsigned int before_count =
3940 GenCollectedHeap::heap()->total_collections();
3941 SurvivorSpacePrecleanClosure
3942 sss_cl(this, _span, &_markBitMap, &_markStack,
3943 &pam_cl, before_count, CMSYield);
3944 _young_gen->from()->object_iterate_careful(&sss_cl);
3945 _young_gen->to()->object_iterate_careful(&sss_cl);
3946 }
3947 MarkRefsIntoAndScanClosure
3948 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4244 // world is stopped at this checkpoint
4245 assert(SafepointSynchronize::is_at_safepoint(),
4246 "world should be stopped");
4247 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4248
4249 verify_work_stacks_empty();
4250 verify_overflow_empty();
4251
4252 if (PrintGCDetails) {
4253 gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4254 _young_gen->used() / K,
4255 _young_gen->capacity() / K);
4256 }
4257 {
4258 if (CMSScavengeBeforeRemark) {
4259 GenCollectedHeap* gch = GenCollectedHeap::heap();
4260 // Temporarily set flag to false, GCH->do_collection will
4261 // expect it to be false and set to true
4262 FlagSetting fl(gch->_is_gc_active, false);
4263 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4264 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4265 gch->do_collection(true, // full (i.e. force, see below)
4266 false, // !clear_all_soft_refs
4267 0, // size
4268 false, // is_tlab
4269 GenCollectedHeap::YoungGen // type
4270 );
4271 }
4272 FreelistLocker x(this);
4273 MutexLockerEx y(bitMapLock(),
4274 Mutex::_no_safepoint_check_flag);
4275 checkpointRootsFinalWork();
4276 }
4277 verify_work_stacks_empty();
4278 verify_overflow_empty();
4279 }
4280
4281 void CMSCollector::checkpointRootsFinalWork() {
4282 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4283
4284 assert(haveFreelistLocks(), "must have free list locks");
4285 assert_lock_strong(bitMapLock());
4286
4287 ResourceMark rm;
4288 HandleMark hm;
4289
4290 GenCollectedHeap* gch = GenCollectedHeap::heap();
4291
4292 if (should_unload_classes()) {
4293 CodeCache::gc_prologue();
4294 }
4295 assert(haveFreelistLocks(), "must have free list locks");
4296 assert_lock_strong(bitMapLock());
4297
4298 // We might assume that we need not fill TLAB's when
4299 // CMSScavengeBeforeRemark is set, because we may have just done
4300 // a scavenge which would have filled all TLAB's -- and besides
4301 // Eden would be empty. This however may not always be the case --
4302 // for instance although we asked for a scavenge, it may not have
4312 gch->save_marks();
4313
4314 if (CMSPrintEdenSurvivorChunks) {
4315 print_eden_and_survivor_chunk_arrays();
4316 }
4317
4318 {
4319 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4320
4321 // Note on the role of the mod union table:
4322 // Since the marker in "markFromRoots" marks concurrently with
4323 // mutators, it is possible for some reachable objects not to have been
4324 // scanned. For instance, an only reference to an object A was
4325 // placed in object B after the marker scanned B. Unless B is rescanned,
4326 // A would be collected. Such updates to references in marked objects
4327 // are detected via the mod union table which is the set of all cards
4328 // dirtied since the first checkpoint in this GC cycle and prior to
4329 // the most recent young generation GC, minus those cleaned up by the
4330 // concurrent precleaning.
4331 if (CMSParallelRemarkEnabled) {
4332 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4333 do_remark_parallel();
4334 } else {
4335 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4336 _gc_timer_cm, _gc_tracer_cm->gc_id());
4337 do_remark_non_parallel();
4338 }
4339 }
4340 verify_work_stacks_empty();
4341 verify_overflow_empty();
4342
4343 {
4344 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4345 refProcessingWork();
4346 }
4347 verify_work_stacks_empty();
4348 verify_overflow_empty();
4349
4350 if (should_unload_classes()) {
4351 CodeCache::gc_epilogue();
4352 }
4353 JvmtiExport::gc_epilogue();
4354
4355 // If we encountered any (marking stack / work queue) overflow
4356 // events during the current CMS cycle, take appropriate
4357 // remedial measures, where possible, so as to try and avoid
4358 // recurrence of that condition.
4359 assert(_markStack.isEmpty(), "No grey objects");
4360 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4361 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4362 if (ser_ovflw > 0) {
4363 if (PrintCMSStatistics != 0) {
4364 gclog_or_tty->print_cr("Marking stack overflow (benign) "
5099 // as a result of work_q overflow
5100 restore_preserved_marks_if_any();
5101 }
5102
5103 // Non-parallel version of remark
5104 void CMSCollector::do_remark_non_parallel() {
5105 ResourceMark rm;
5106 HandleMark hm;
5107 GenCollectedHeap* gch = GenCollectedHeap::heap();
5108 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5109
5110 MarkRefsIntoAndScanClosure
5111 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5112 &_markStack, this,
5113 false /* should_yield */, false /* not precleaning */);
5114 MarkFromDirtyCardsClosure
5115 markFromDirtyCardsClosure(this, _span,
5116 NULL, // space is set further below
5117 &_markBitMap, &_markStack, &mrias_cl);
5118 {
5119 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5120 // Iterate over the dirty cards, setting the corresponding bits in the
5121 // mod union table.
5122 {
5123 ModUnionClosure modUnionClosure(&_modUnionTable);
5124 _ct->ct_bs()->dirty_card_iterate(
5125 _cmsGen->used_region(),
5126 &modUnionClosure);
5127 }
5128 // Having transferred these marks into the modUnionTable, we just need
5129 // to rescan the marked objects on the dirty cards in the modUnionTable.
5130 // The initial marking may have been done during an asynchronous
5131 // collection so there may be dirty bits in the mod-union table.
5132 const int alignment =
5133 CardTableModRefBS::card_size * BitsPerWord;
5134 {
5135 // ... First handle dirty cards in CMS gen
5136 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5137 MemRegion ur = _cmsGen->used_region();
5138 HeapWord* lb = ur.start();
5139 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5140 MemRegion cms_span(lb, ub);
5141 _modUnionTable.dirty_range_iterate_clear(cms_span,
5142 &markFromDirtyCardsClosure);
5143 verify_work_stacks_empty();
5144 if (PrintCMSStatistics != 0) {
5145 gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5146 markFromDirtyCardsClosure.num_dirty_cards());
5147 }
5148 }
5149 }
5150 if (VerifyDuringGC &&
5151 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5152 HandleMark hm; // Discard invalid handles created during verification
5153 Universe::verify();
5154 }
5155 {
5156 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5157
5158 verify_work_stacks_empty();
5159
5160 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5161 StrongRootsScope srs(1);
5162
5163 gch->gen_process_roots(&srs,
5164 GenCollectedHeap::OldGen,
5165 true, // young gen as roots
5166 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5167 should_unload_classes(),
5168 &mrias_cl,
5169 NULL,
5170 NULL); // The dirty klasses will be handled below
5171
5172 assert(should_unload_classes()
5173 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5174 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5175 }
5176
5177 {
5178 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5179
5180 verify_work_stacks_empty();
5181
5182 // Scan all class loader data objects that might have been introduced
5183 // during concurrent marking.
5184 ResourceMark rm;
5185 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5186 for (int i = 0; i < array->length(); i++) {
5187 mrias_cl.do_cld_nv(array->at(i));
5188 }
5189
5190 // We don't need to keep track of new CLDs anymore.
5191 ClassLoaderDataGraph::remember_new_clds(false);
5192
5193 verify_work_stacks_empty();
5194 }
5195
5196 {
5197 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5198
5199 verify_work_stacks_empty();
5200
5201 RemarkKlassClosure remark_klass_closure(&mrias_cl);
5202 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5203
5204 verify_work_stacks_empty();
5205 }
5206
5207 // We might have added oops to ClassLoaderData::_handles during the
5208 // concurrent marking phase. These oops point to newly allocated objects
5209 // that are guaranteed to be kept alive. Either by the direct allocation
5210 // code, or when the young collector processes the roots. Hence,
5211 // we don't have to revisit the _handles block during the remark phase.
5212
5213 verify_work_stacks_empty();
5214 // Restore evacuated mark words, if any, used for overflow list links
5215 if (!CMSOverflowEarlyRestoration) {
5216 restore_preserved_marks_if_any();
5217 }
5386 workers->run_task(&enq_task);
5387 }
5388
5389 void CMSCollector::refProcessingWork() {
5390 ResourceMark rm;
5391 HandleMark hm;
5392
5393 ReferenceProcessor* rp = ref_processor();
5394 assert(rp->span().equals(_span), "Spans should be equal");
5395 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5396 // Process weak references.
5397 rp->setup_policy(false);
5398 verify_work_stacks_empty();
5399
5400 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5401 &_markStack, false /* !preclean */);
5402 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5403 _span, &_markBitMap, &_markStack,
5404 &cmsKeepAliveClosure, false /* !preclean */);
5405 {
5406 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5407
5408 ReferenceProcessorStats stats;
5409 if (rp->processing_is_mt()) {
5410 // Set the degree of MT here. If the discovery is done MT, there
5411 // may have been a different number of threads doing the discovery
5412 // and a different number of discovered lists may have Ref objects.
5413 // That is OK as long as the Reference lists are balanced (see
5414 // balance_all_queues() and balance_queues()).
5415 GenCollectedHeap* gch = GenCollectedHeap::heap();
5416 uint active_workers = ParallelGCThreads;
5417 WorkGang* workers = gch->workers();
5418 if (workers != NULL) {
5419 active_workers = workers->active_workers();
5420 // The expectation is that active_workers will have already
5421 // been set to a reasonable value. If it has not been set,
5422 // investigate.
5423 assert(active_workers > 0, "Should have been set during scavenge");
5424 }
5425 rp->set_active_mt_degree(active_workers);
5426 CMSRefProcTaskExecutor task_executor(*this);
5427 stats = rp->process_discovered_references(&_is_alive_closure,
5428 &cmsKeepAliveClosure,
5429 &cmsDrainMarkingStackClosure,
5430 &task_executor,
5431 _gc_timer_cm,
5432 _gc_tracer_cm->gc_id());
5433 } else {
5434 stats = rp->process_discovered_references(&_is_alive_closure,
5435 &cmsKeepAliveClosure,
5436 &cmsDrainMarkingStackClosure,
5437 NULL,
5438 _gc_timer_cm,
5439 _gc_tracer_cm->gc_id());
5440 }
5441 _gc_tracer_cm->report_gc_reference_stats(stats);
5442
5443 }
5444
5445 // This is the point where the entire marking should have completed.
5446 verify_work_stacks_empty();
5447
5448 if (should_unload_classes()) {
5449 {
5450 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5451
5452 // Unload classes and purge the SystemDictionary.
5453 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5454
5455 // Unload nmethods.
5456 CodeCache::do_unloading(&_is_alive_closure, purged_class);
5457
5458 // Prune dead klasses from subklass/sibling/implementor lists.
5459 Klass::clean_weak_klass_links(&_is_alive_closure);
5460 }
5461
5462 {
5463 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5464 // Clean up unreferenced symbols in symbol table.
5465 SymbolTable::unlink();
5466 }
5467
5468 {
5469 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5470 // Delete entries for dead interned strings.
5471 StringTable::unlink(&_is_alive_closure);
5472 }
5473 }
5474
5475
5476 // Restore any preserved marks as a result of mark stack or
5477 // work queue overflow
5478 restore_preserved_marks_if_any(); // done single-threaded for now
5479
5480 rp->set_enqueuing_is_done(true);
5481 if (rp->processing_is_mt()) {
5482 rp->balance_all_queues();
5483 CMSRefProcTaskExecutor task_executor(*this);
5484 rp->enqueue_discovered_references(&task_executor);
5485 } else {
5486 rp->enqueue_discovered_references(NULL);
5487 }
5488 rp->verify_no_references_recorded();
5489 assert(!rp->discovery_enabled(), "should have been disabled");
5517 }
5518 }
5519 #endif
5520
5521 void CMSCollector::sweep() {
5522 assert(_collectorState == Sweeping, "just checking");
5523 check_correct_thread_executing();
5524 verify_work_stacks_empty();
5525 verify_overflow_empty();
5526 increment_sweep_count();
5527 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5528
5529 _inter_sweep_timer.stop();
5530 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5531
5532 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5533 _intra_sweep_timer.reset();
5534 _intra_sweep_timer.start();
5535 {
5536 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5537 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5538 // First sweep the old gen
5539 {
5540 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5541 bitMapLock());
5542 sweepWork(_cmsGen);
5543 }
5544
5545 // Update Universe::_heap_*_at_gc figures.
5546 // We need all the free list locks to make the abstract state
5547 // transition from Sweeping to Resetting. See detailed note
5548 // further below.
5549 {
5550 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5551 // Update heap occupancy information which is used as
5552 // input to soft ref clearing policy at the next gc.
5553 Universe::update_heap_info_at_gc();
5554 _collectorState = Resizing;
5555 }
5556 }
5557 verify_work_stacks_empty();
5702 }
5703 }
5704
5705 // Reset CMS data structures (for now just the marking bit map)
5706 // preparatory for the next cycle.
5707 void CMSCollector::reset(bool concurrent) {
5708 if (concurrent) {
5709 CMSTokenSyncWithLocks ts(true, bitMapLock());
5710
5711 // If the state is not "Resetting", the foreground thread
5712 // has done a collection and the resetting.
5713 if (_collectorState != Resetting) {
5714 assert(_collectorState == Idling, "The state should only change"
5715 " because the foreground collector has finished the collection");
5716 return;
5717 }
5718
5719 // Clear the mark bitmap (no grey objects to start with)
5720 // for the next cycle.
5721 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5722 CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5723
5724 HeapWord* curAddr = _markBitMap.startWord();
5725 while (curAddr < _markBitMap.endWord()) {
5726 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5727 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5728 _markBitMap.clear_large_range(chunk);
5729 if (ConcurrentMarkSweepThread::should_yield() &&
5730 !foregroundGCIsActive() &&
5731 CMSYield) {
5732 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5733 "CMS thread should hold CMS token");
5734 assert_lock_strong(bitMapLock());
5735 bitMapLock()->unlock();
5736 ConcurrentMarkSweepThread::desynchronize(true);
5737 stopTimer();
5738 if (PrintCMSStatistics != 0) {
5739 incrementYields();
5740 }
5741
5742 // See the comment in coordinator_yield()
5754 }
5755 // A successful mostly concurrent collection has been done.
5756 // Because only the full (i.e., concurrent mode failure) collections
5757 // are being measured for gc overhead limits, clean the "near" flag
5758 // and count.
5759 size_policy()->reset_gc_overhead_limit_count();
5760 _collectorState = Idling;
5761 } else {
5762 // already have the lock
5763 assert(_collectorState == Resetting, "just checking");
5764 assert_lock_strong(bitMapLock());
5765 _markBitMap.clear_all();
5766 _collectorState = Idling;
5767 }
5768
5769 register_gc_end();
5770 }
5771
5772 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5773 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5774 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5775 TraceCollectorStats tcs(counters());
5776
5777 switch (op) {
5778 case CMS_op_checkpointRootsInitial: {
5779 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5780 checkpointRootsInitial();
5781 if (PrintGC) {
5782 _cmsGen->printOccupancy("initial-mark");
5783 }
5784 break;
5785 }
5786 case CMS_op_checkpointRootsFinal: {
5787 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5788 checkpointRootsFinal();
5789 if (PrintGC) {
5790 _cmsGen->printOccupancy("remark");
5791 }
5792 break;
5793 }
5794 default:
|
1576 // after obtaining the free list locks for the
1577 // two generations.
1578 void CMSCollector::compute_new_size() {
1579 assert_locked_or_safepoint(Heap_lock);
1580 FreelistLocker z(this);
1581 MetaspaceGC::compute_new_size();
1582 _cmsGen->compute_new_size_free_list();
1583 }
1584
1585 // A work method used by the foreground collector to do
1586 // a mark-sweep-compact.
1587 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1588 GenCollectedHeap* gch = GenCollectedHeap::heap();
1589
1590 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1591 gc_timer->register_gc_start();
1592
1593 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1594 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1595
1596 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1597
1598 // Temporarily widen the span of the weak reference processing to
1599 // the entire heap.
1600 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1601 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1602 // Temporarily, clear the "is_alive_non_header" field of the
1603 // reference processor.
1604 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1605 // Temporarily make reference _processing_ single threaded (non-MT).
1606 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1607 // Temporarily make refs discovery atomic
1608 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1609 // Temporarily make reference _discovery_ single threaded (non-MT)
1610 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1611
1612 ref_processor()->set_enqueuing_is_done(false);
1613 ref_processor()->enable_discovery();
1614 ref_processor()->setup_policy(clear_all_soft_refs);
1615 // If an asynchronous collection finishes, the _modUnionTable is
1616 // all clear. If we are assuming the collection from an asynchronous
2808 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2809 assert_locked_or_safepoint(Heap_lock);
2810 }
2811
2812 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2813 assert_locked_or_safepoint(Heap_lock);
2814 assert_lock_strong(freelistLock());
2815 if (PrintGCDetails && Verbose) {
2816 warning("Shrinking of CMS not yet implemented");
2817 }
2818 return;
2819 }
2820
2821
2822 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2823 // phases.
2824 class CMSPhaseAccounting: public StackObj {
2825 public:
2826 CMSPhaseAccounting(CMSCollector *collector,
2827 const char *phase,
2828 bool print_cr = true);
2829 ~CMSPhaseAccounting();
2830
2831 private:
2832 CMSCollector *_collector;
2833 const char *_phase;
2834 elapsedTimer _wallclock;
2835 bool _print_cr;
2836
2837 public:
2838 // Not MT-safe; so do not pass around these StackObj's
2839 // where they may be accessed by other threads.
2840 jlong wallclock_millis() {
2841 assert(_wallclock.is_active(), "Wall clock should not stop");
2842 _wallclock.stop(); // to record time
2843 jlong ret = _wallclock.milliseconds();
2844 _wallclock.start(); // restart
2845 return ret;
2846 }
2847 };
2848
2849 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2850 const char *phase,
2851 bool print_cr) :
2852 _collector(collector), _phase(phase), _print_cr(print_cr) {
2853
2854 if (PrintCMSStatistics != 0) {
2855 _collector->resetYields();
2856 }
2857 if (PrintGCDetails) {
2858 gclog_or_tty->gclog_stamp();
2859 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2860 _collector->cmsGen()->short_name(), _phase);
2861 }
2862 _collector->resetTimer();
2863 _wallclock.start();
2864 _collector->startTimer();
2865 }
2866
2867 CMSPhaseAccounting::~CMSPhaseAccounting() {
2868 assert(_wallclock.is_active(), "Wall clock should not have stopped");
2869 _collector->stopTimer();
2870 _wallclock.stop();
2871 if (PrintGCDetails) {
2872 gclog_or_tty->gclog_stamp();
2873 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2874 _collector->cmsGen()->short_name(),
2875 _phase, _collector->timerValue(), _wallclock.seconds());
2876 if (_print_cr) {
2877 gclog_or_tty->cr();
2878 }
2879 if (PrintCMSStatistics != 0) {
2880 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2881 _collector->yields());
2882 }
2883 }
2884 }
2885
2886 // CMS work
2887
2888 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2889 class CMSParMarkTask : public AbstractGangTask {
2890 protected:
2891 CMSCollector* _collector;
2892 uint _n_workers;
2931 checkpointRootsInitialWork();
2932 // enable ("weak") refs discovery
2933 rp->enable_discovery();
2934 _collectorState = Marking;
2935 }
2936 }
2937
2938 void CMSCollector::checkpointRootsInitialWork() {
2939 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2940 assert(_collectorState == InitialMarking, "just checking");
2941
2942 // Already have locks.
2943 assert_lock_strong(bitMapLock());
2944 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2945
2946 // Setup the verification and class unloading state for this
2947 // CMS collection cycle.
2948 setup_cms_unloading_and_verification_state();
2949
2950 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2951 PrintGCDetails && Verbose, true, _gc_timer_cm);)
2952
2953 // Reset all the PLAB chunk arrays if necessary.
2954 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2955 reset_survivor_plab_arrays();
2956 }
2957
2958 ResourceMark rm;
2959 HandleMark hm;
2960
2961 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2962 GenCollectedHeap* gch = GenCollectedHeap::heap();
2963
2964 verify_work_stacks_empty();
2965 verify_overflow_empty();
2966
2967 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2968 // Update the saved marks which may affect the root scans.
2969 gch->save_marks();
2970
2971 // weak reference processing has not started yet.
3034 verify_overflow_empty();
3035 }
3036
3037 bool CMSCollector::markFromRoots() {
3038 // we might be tempted to assert that:
3039 // assert(!SafepointSynchronize::is_at_safepoint(),
3040 // "inconsistent argument?");
3041 // However that wouldn't be right, because it's possible that
3042 // a safepoint is indeed in progress as a young generation
3043 // stop-the-world GC happens even as we mark in this generation.
3044 assert(_collectorState == Marking, "inconsistent state?");
3045 check_correct_thread_executing();
3046 verify_overflow_empty();
3047
3048 // Weak ref discovery note: We may be discovering weak
3049 // refs in this generation concurrent (but interleaved) with
3050 // weak ref discovery by the young generation collector.
3051
3052 CMSTokenSyncWithLocks ts(true, bitMapLock());
3053 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3054 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3055 bool res = markFromRootsWork();
3056 if (res) {
3057 _collectorState = Precleaning;
3058 } else { // We failed and a foreground collection wants to take over
3059 assert(_foregroundGCIsActive, "internal state inconsistency");
3060 assert(_restart_addr == NULL, "foreground will restart from scratch");
3061 if (PrintGCDetails) {
3062 gclog_or_tty->print_cr("bailing out to foreground collection");
3063 }
3064 }
3065 verify_overflow_empty();
3066 return res;
3067 }
3068
3069 bool CMSCollector::markFromRootsWork() {
3070 // iterate over marked bits in bit map, doing a full scan and mark
3071 // from these roots using the following algorithm:
3072 // . if oop is to the right of the current scan pointer,
3073 // mark corresponding bit (we'll process it later)
3074 // . else (oop is to left of current scan pointer)
3731 check_correct_thread_executing();
3732 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3733 verify_work_stacks_empty();
3734 verify_overflow_empty();
3735 _abort_preclean = false;
3736 if (CMSPrecleaningEnabled) {
3737 if (!CMSEdenChunksRecordAlways) {
3738 _eden_chunk_index = 0;
3739 }
3740 size_t used = get_eden_used();
3741 size_t capacity = get_eden_capacity();
3742 // Don't start sampling unless we will get sufficiently
3743 // many samples.
3744 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3745 * CMSScheduleRemarkEdenPenetration)) {
3746 _start_sampling = true;
3747 } else {
3748 _start_sampling = false;
3749 }
3750 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3751 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3752 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3753 }
3754 CMSTokenSync x(true); // is cms thread
3755 if (CMSPrecleaningEnabled) {
3756 sample_eden();
3757 _collectorState = AbortablePreclean;
3758 } else {
3759 _collectorState = FinalMarking;
3760 }
3761 verify_work_stacks_empty();
3762 verify_overflow_empty();
3763 }
3764
3765 // Try and schedule the remark such that young gen
3766 // occupancy is CMSScheduleRemarkEdenPenetration %.
3767 void CMSCollector::abortable_preclean() {
3768 check_correct_thread_executing();
3769 assert(CMSPrecleaningEnabled, "Inconsistent control state");
3770 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3771
3772 // If Eden's current occupancy is below this threshold,
3773 // immediately schedule the remark; else preclean
3774 // past the next scavenge in an effort to
3775 // schedule the pause as described above. By choosing
3776 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3777 // we will never do an actual abortable preclean cycle.
3778 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3779 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3780 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3781 // We need more smarts in the abortable preclean
3782 // loop below to deal with cases where allocation
3783 // in young gen is very very slow, and our precleaning
3784 // is running a losing race against a horde of
3785 // mutators intent on flooding us with CMS updates
3786 // (dirty cards).
3787 // One, admittedly dumb, strategy is to give up
3788 // after a certain number of abortable precleaning loops
3789 // or after a certain maximum time. We want to make
3790 // this smarter in the next iteration.
3791 // XXX FIX ME!!! YSR
3792 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3793 while (!(should_abort_preclean() ||
3794 ConcurrentMarkSweepThread::should_terminate())) {
3795 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3796 cumworkdone += workdone;
3797 loops++;
3798 // Voluntarily terminate abortable preclean phase if we have
3799 // been at it for too long.
3800 if ((CMSMaxAbortablePrecleanLoops != 0) &&
3905 // Note that we don't need to protect ourselves from
3906 // interference with mutators because they can't
3907 // manipulate the discovered reference lists nor affect
3908 // the computed reachability of the referents, the
3909 // only properties manipulated by the precleaning
3910 // of these reference lists.
3911 stopTimer();
3912 CMSTokenSyncWithLocks x(true /* is cms thread */,
3913 bitMapLock());
3914 startTimer();
3915 sample_eden();
3916
3917 // The following will yield to allow foreground
3918 // collection to proceed promptly. XXX YSR:
3919 // The code in this method may need further
3920 // tweaking for better performance and some restructuring
3921 // for cleaner interfaces.
3922 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3923 rp->preclean_discovered_references(
3924 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3925 gc_timer);
3926 }
3927
3928 if (clean_survivor) { // preclean the active survivor space(s)
3929 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3930 &_markBitMap, &_modUnionTable,
3931 &_markStack, true /* precleaning phase */);
3932 stopTimer();
3933 CMSTokenSyncWithLocks ts(true /* is cms thread */,
3934 bitMapLock());
3935 startTimer();
3936 unsigned int before_count =
3937 GenCollectedHeap::heap()->total_collections();
3938 SurvivorSpacePrecleanClosure
3939 sss_cl(this, _span, &_markBitMap, &_markStack,
3940 &pam_cl, before_count, CMSYield);
3941 _young_gen->from()->object_iterate_careful(&sss_cl);
3942 _young_gen->to()->object_iterate_careful(&sss_cl);
3943 }
3944 MarkRefsIntoAndScanClosure
3945 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4241 // world is stopped at this checkpoint
4242 assert(SafepointSynchronize::is_at_safepoint(),
4243 "world should be stopped");
4244 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4245
4246 verify_work_stacks_empty();
4247 verify_overflow_empty();
4248
4249 if (PrintGCDetails) {
4250 gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4251 _young_gen->used() / K,
4252 _young_gen->capacity() / K);
4253 }
4254 {
4255 if (CMSScavengeBeforeRemark) {
4256 GenCollectedHeap* gch = GenCollectedHeap::heap();
4257 // Temporarily set flag to false, GCH->do_collection will
4258 // expect it to be false and set to true
4259 FlagSetting fl(gch->_is_gc_active, false);
4260 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4261 PrintGCDetails && Verbose, true, _gc_timer_cm);)
4262 gch->do_collection(true, // full (i.e. force, see below)
4263 false, // !clear_all_soft_refs
4264 0, // size
4265 false, // is_tlab
4266 GenCollectedHeap::YoungGen // type
4267 );
4268 }
4269 FreelistLocker x(this);
4270 MutexLockerEx y(bitMapLock(),
4271 Mutex::_no_safepoint_check_flag);
4272 checkpointRootsFinalWork();
4273 }
4274 verify_work_stacks_empty();
4275 verify_overflow_empty();
4276 }
4277
4278 void CMSCollector::checkpointRootsFinalWork() {
4279 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4280
4281 assert(haveFreelistLocks(), "must have free list locks");
4282 assert_lock_strong(bitMapLock());
4283
4284 ResourceMark rm;
4285 HandleMark hm;
4286
4287 GenCollectedHeap* gch = GenCollectedHeap::heap();
4288
4289 if (should_unload_classes()) {
4290 CodeCache::gc_prologue();
4291 }
4292 assert(haveFreelistLocks(), "must have free list locks");
4293 assert_lock_strong(bitMapLock());
4294
4295 // We might assume that we need not fill TLAB's when
4296 // CMSScavengeBeforeRemark is set, because we may have just done
4297 // a scavenge which would have filled all TLAB's -- and besides
4298 // Eden would be empty. This however may not always be the case --
4299 // for instance although we asked for a scavenge, it may not have
4309 gch->save_marks();
4310
4311 if (CMSPrintEdenSurvivorChunks) {
4312 print_eden_and_survivor_chunk_arrays();
4313 }
4314
4315 {
4316 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4317
4318 // Note on the role of the mod union table:
4319 // Since the marker in "markFromRoots" marks concurrently with
4320 // mutators, it is possible for some reachable objects not to have been
4321 // scanned. For instance, an only reference to an object A was
4322 // placed in object B after the marker scanned B. Unless B is rescanned,
4323 // A would be collected. Such updates to references in marked objects
4324 // are detected via the mod union table which is the set of all cards
4325 // dirtied since the first checkpoint in this GC cycle and prior to
4326 // the most recent young generation GC, minus those cleaned up by the
4327 // concurrent precleaning.
4328 if (CMSParallelRemarkEnabled) {
4329 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4330 do_remark_parallel();
4331 } else {
4332 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
4333 do_remark_non_parallel();
4334 }
4335 }
4336 verify_work_stacks_empty();
4337 verify_overflow_empty();
4338
4339 {
4340 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
4341 refProcessingWork();
4342 }
4343 verify_work_stacks_empty();
4344 verify_overflow_empty();
4345
4346 if (should_unload_classes()) {
4347 CodeCache::gc_epilogue();
4348 }
4349 JvmtiExport::gc_epilogue();
4350
4351 // If we encountered any (marking stack / work queue) overflow
4352 // events during the current CMS cycle, take appropriate
4353 // remedial measures, where possible, so as to try and avoid
4354 // recurrence of that condition.
4355 assert(_markStack.isEmpty(), "No grey objects");
4356 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4357 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4358 if (ser_ovflw > 0) {
4359 if (PrintCMSStatistics != 0) {
4360 gclog_or_tty->print_cr("Marking stack overflow (benign) "
5095 // as a result of work_q overflow
5096 restore_preserved_marks_if_any();
5097 }
5098
5099 // Non-parallel version of remark
5100 void CMSCollector::do_remark_non_parallel() {
5101 ResourceMark rm;
5102 HandleMark hm;
5103 GenCollectedHeap* gch = GenCollectedHeap::heap();
5104 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5105
5106 MarkRefsIntoAndScanClosure
5107 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5108 &_markStack, this,
5109 false /* should_yield */, false /* not precleaning */);
5110 MarkFromDirtyCardsClosure
5111 markFromDirtyCardsClosure(this, _span,
5112 NULL, // space is set further below
5113 &_markBitMap, &_markStack, &mrias_cl);
5114 {
5115 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5116 // Iterate over the dirty cards, setting the corresponding bits in the
5117 // mod union table.
5118 {
5119 ModUnionClosure modUnionClosure(&_modUnionTable);
5120 _ct->ct_bs()->dirty_card_iterate(
5121 _cmsGen->used_region(),
5122 &modUnionClosure);
5123 }
5124 // Having transferred these marks into the modUnionTable, we just need
5125 // to rescan the marked objects on the dirty cards in the modUnionTable.
5126 // The initial marking may have been done during an asynchronous
5127 // collection so there may be dirty bits in the mod-union table.
5128 const int alignment =
5129 CardTableModRefBS::card_size * BitsPerWord;
5130 {
5131 // ... First handle dirty cards in CMS gen
5132 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5133 MemRegion ur = _cmsGen->used_region();
5134 HeapWord* lb = ur.start();
5135 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5136 MemRegion cms_span(lb, ub);
5137 _modUnionTable.dirty_range_iterate_clear(cms_span,
5138 &markFromDirtyCardsClosure);
5139 verify_work_stacks_empty();
5140 if (PrintCMSStatistics != 0) {
5141 gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5142 markFromDirtyCardsClosure.num_dirty_cards());
5143 }
5144 }
5145 }
5146 if (VerifyDuringGC &&
5147 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5148 HandleMark hm; // Discard invalid handles created during verification
5149 Universe::verify();
5150 }
5151 {
5152 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5153
5154 verify_work_stacks_empty();
5155
5156 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5157 StrongRootsScope srs(1);
5158
5159 gch->gen_process_roots(&srs,
5160 GenCollectedHeap::OldGen,
5161 true, // young gen as roots
5162 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5163 should_unload_classes(),
5164 &mrias_cl,
5165 NULL,
5166 NULL); // The dirty klasses will be handled below
5167
5168 assert(should_unload_classes()
5169 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5170 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5171 }
5172
5173 {
5174 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5175
5176 verify_work_stacks_empty();
5177
5178 // Scan all class loader data objects that might have been introduced
5179 // during concurrent marking.
5180 ResourceMark rm;
5181 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5182 for (int i = 0; i < array->length(); i++) {
5183 mrias_cl.do_cld_nv(array->at(i));
5184 }
5185
5186 // We don't need to keep track of new CLDs anymore.
5187 ClassLoaderDataGraph::remember_new_clds(false);
5188
5189 verify_work_stacks_empty();
5190 }
5191
5192 {
5193 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5194
5195 verify_work_stacks_empty();
5196
5197 RemarkKlassClosure remark_klass_closure(&mrias_cl);
5198 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5199
5200 verify_work_stacks_empty();
5201 }
5202
5203 // We might have added oops to ClassLoaderData::_handles during the
5204 // concurrent marking phase. These oops point to newly allocated objects
5205 // that are guaranteed to be kept alive. Either by the direct allocation
5206 // code, or when the young collector processes the roots. Hence,
5207 // we don't have to revisit the _handles block during the remark phase.
5208
5209 verify_work_stacks_empty();
5210 // Restore evacuated mark words, if any, used for overflow list links
5211 if (!CMSOverflowEarlyRestoration) {
5212 restore_preserved_marks_if_any();
5213 }
5382 workers->run_task(&enq_task);
5383 }
5384
5385 void CMSCollector::refProcessingWork() {
5386 ResourceMark rm;
5387 HandleMark hm;
5388
5389 ReferenceProcessor* rp = ref_processor();
5390 assert(rp->span().equals(_span), "Spans should be equal");
5391 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5392 // Process weak references.
5393 rp->setup_policy(false);
5394 verify_work_stacks_empty();
5395
5396 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5397 &_markStack, false /* !preclean */);
5398 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5399 _span, &_markBitMap, &_markStack,
5400 &cmsKeepAliveClosure, false /* !preclean */);
5401 {
5402 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5403
5404 ReferenceProcessorStats stats;
5405 if (rp->processing_is_mt()) {
5406 // Set the degree of MT here. If the discovery is done MT, there
5407 // may have been a different number of threads doing the discovery
5408 // and a different number of discovered lists may have Ref objects.
5409 // That is OK as long as the Reference lists are balanced (see
5410 // balance_all_queues() and balance_queues()).
5411 GenCollectedHeap* gch = GenCollectedHeap::heap();
5412 uint active_workers = ParallelGCThreads;
5413 WorkGang* workers = gch->workers();
5414 if (workers != NULL) {
5415 active_workers = workers->active_workers();
5416 // The expectation is that active_workers will have already
5417 // been set to a reasonable value. If it has not been set,
5418 // investigate.
5419 assert(active_workers > 0, "Should have been set during scavenge");
5420 }
5421 rp->set_active_mt_degree(active_workers);
5422 CMSRefProcTaskExecutor task_executor(*this);
5423 stats = rp->process_discovered_references(&_is_alive_closure,
5424 &cmsKeepAliveClosure,
5425 &cmsDrainMarkingStackClosure,
5426 &task_executor,
5427 _gc_timer_cm);
5428 } else {
5429 stats = rp->process_discovered_references(&_is_alive_closure,
5430 &cmsKeepAliveClosure,
5431 &cmsDrainMarkingStackClosure,
5432 NULL,
5433 _gc_timer_cm);
5434 }
5435 _gc_tracer_cm->report_gc_reference_stats(stats);
5436
5437 }
5438
5439 // This is the point where the entire marking should have completed.
5440 verify_work_stacks_empty();
5441
5442 if (should_unload_classes()) {
5443 {
5444 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5445
5446 // Unload classes and purge the SystemDictionary.
5447 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5448
5449 // Unload nmethods.
5450 CodeCache::do_unloading(&_is_alive_closure, purged_class);
5451
5452 // Prune dead klasses from subklass/sibling/implementor lists.
5453 Klass::clean_weak_klass_links(&_is_alive_closure);
5454 }
5455
5456 {
5457 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5458 // Clean up unreferenced symbols in symbol table.
5459 SymbolTable::unlink();
5460 }
5461
5462 {
5463 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5464 // Delete entries for dead interned strings.
5465 StringTable::unlink(&_is_alive_closure);
5466 }
5467 }
5468
5469
5470 // Restore any preserved marks as a result of mark stack or
5471 // work queue overflow
5472 restore_preserved_marks_if_any(); // done single-threaded for now
5473
5474 rp->set_enqueuing_is_done(true);
5475 if (rp->processing_is_mt()) {
5476 rp->balance_all_queues();
5477 CMSRefProcTaskExecutor task_executor(*this);
5478 rp->enqueue_discovered_references(&task_executor);
5479 } else {
5480 rp->enqueue_discovered_references(NULL);
5481 }
5482 rp->verify_no_references_recorded();
5483 assert(!rp->discovery_enabled(), "should have been disabled");
5511 }
5512 }
5513 #endif
5514
5515 void CMSCollector::sweep() {
5516 assert(_collectorState == Sweeping, "just checking");
5517 check_correct_thread_executing();
5518 verify_work_stacks_empty();
5519 verify_overflow_empty();
5520 increment_sweep_count();
5521 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5522
5523 _inter_sweep_timer.stop();
5524 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5525
5526 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5527 _intra_sweep_timer.reset();
5528 _intra_sweep_timer.start();
5529 {
5530 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5531 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5532 // First sweep the old gen
5533 {
5534 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5535 bitMapLock());
5536 sweepWork(_cmsGen);
5537 }
5538
5539 // Update Universe::_heap_*_at_gc figures.
5540 // We need all the free list locks to make the abstract state
5541 // transition from Sweeping to Resetting. See detailed note
5542 // further below.
5543 {
5544 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5545 // Update heap occupancy information which is used as
5546 // input to soft ref clearing policy at the next gc.
5547 Universe::update_heap_info_at_gc();
5548 _collectorState = Resizing;
5549 }
5550 }
5551 verify_work_stacks_empty();
5696 }
5697 }
5698
5699 // Reset CMS data structures (for now just the marking bit map)
5700 // preparatory for the next cycle.
5701 void CMSCollector::reset(bool concurrent) {
5702 if (concurrent) {
5703 CMSTokenSyncWithLocks ts(true, bitMapLock());
5704
5705 // If the state is not "Resetting", the foreground thread
5706 // has done a collection and the resetting.
5707 if (_collectorState != Resetting) {
5708 assert(_collectorState == Idling, "The state should only change"
5709 " because the foreground collector has finished the collection");
5710 return;
5711 }
5712
5713 // Clear the mark bitmap (no grey objects to start with)
5714 // for the next cycle.
5715 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5716 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5717
5718 HeapWord* curAddr = _markBitMap.startWord();
5719 while (curAddr < _markBitMap.endWord()) {
5720 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5721 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5722 _markBitMap.clear_large_range(chunk);
5723 if (ConcurrentMarkSweepThread::should_yield() &&
5724 !foregroundGCIsActive() &&
5725 CMSYield) {
5726 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5727 "CMS thread should hold CMS token");
5728 assert_lock_strong(bitMapLock());
5729 bitMapLock()->unlock();
5730 ConcurrentMarkSweepThread::desynchronize(true);
5731 stopTimer();
5732 if (PrintCMSStatistics != 0) {
5733 incrementYields();
5734 }
5735
5736 // See the comment in coordinator_yield()
5748 }
5749 // A successful mostly concurrent collection has been done.
5750 // Because only the full (i.e., concurrent mode failure) collections
5751 // are being measured for gc overhead limits, clean the "near" flag
5752 // and count.
5753 size_policy()->reset_gc_overhead_limit_count();
5754 _collectorState = Idling;
5755 } else {
5756 // already have the lock
5757 assert(_collectorState == Resetting, "just checking");
5758 assert_lock_strong(bitMapLock());
5759 _markBitMap.clear_all();
5760 _collectorState = Idling;
5761 }
5762
5763 register_gc_end();
5764 }
5765
5766 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5767 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5768 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
5769 TraceCollectorStats tcs(counters());
5770
5771 switch (op) {
5772 case CMS_op_checkpointRootsInitial: {
5773 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5774 checkpointRootsInitial();
5775 if (PrintGC) {
5776 _cmsGen->printOccupancy("initial-mark");
5777 }
5778 break;
5779 }
5780 case CMS_op_checkpointRootsFinal: {
5781 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5782 checkpointRootsFinal();
5783 if (PrintGC) {
5784 _cmsGen->printOccupancy("remark");
5785 }
5786 break;
5787 }
5788 default:
|