2980 } else if (CMSRemarkVerifyVariant == 2) {
2981 // In this second variant of verification, we flag an error
2982 // (i.e. an object reachable in the new marks-vector not reachable
2983 // in the CMS marks-vector) immediately, also indicating the
2984 // identify of an object (A) that references the unmarked object (B) --
2985 // presumably, a mutation to A failed to be picked up by preclean/remark?
2986 verify_after_remark_work_2();
2987 } else {
2988 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2989 CMSRemarkVerifyVariant);
2990 }
2991 if (!silent) gclog_or_tty->print(" done] ");
2992 return true;
2993 }
2994
2995 void CMSCollector::verify_after_remark_work_1() {
2996 ResourceMark rm;
2997 HandleMark hm;
2998 GenCollectedHeap* gch = GenCollectedHeap::heap();
2999
3000 // Get a clear set of claim bits for the strong roots processing to work with.
3001 ClassLoaderDataGraph::clear_claimed_marks();
3002
3003 // Mark from roots one level into CMS
3004 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3005 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3006
3007 gch->gen_process_strong_roots(_cmsGen->level(),
3008 true, // younger gens are roots
3009 true, // activate StrongRootsScope
3010 SharedHeap::ScanningOption(roots_scanning_options()),
3011 ¬Older,
3012 NULL,
3013 NULL); // SSS: Provide correct closure
3014
3015 // Now mark from the roots
3016 MarkFromRootsClosure markFromRootsClosure(this, _span,
3017 verification_mark_bm(), verification_mark_stack(),
3018 false /* don't yield */, true /* verifying */);
3019 assert(_restart_addr == NULL, "Expected pre-condition");
3020 verification_mark_bm()->iterate(&markFromRootsClosure);
3021 while (_restart_addr != NULL) {
3022 // Deal with stack overflow: by restarting at the indicated
3023 // address.
3024 HeapWord* ra = _restart_addr;
3025 markFromRootsClosure.reset(ra);
3026 _restart_addr = NULL;
3027 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3028 }
3029 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3030 verify_work_stacks_empty();
3044 class VerifyKlassOopsKlassClosure : public KlassClosure {
3045 class VerifyKlassOopsClosure : public OopClosure {
3046 CMSBitMap* _bitmap;
3047 public:
3048 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3049 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3050 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3051 } _oop_closure;
3052 public:
3053 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3054 void do_klass(Klass* k) {
3055 k->oops_do(&_oop_closure);
3056 }
3057 };
3058
3059 void CMSCollector::verify_after_remark_work_2() {
3060 ResourceMark rm;
3061 HandleMark hm;
3062 GenCollectedHeap* gch = GenCollectedHeap::heap();
3063
3064 // Get a clear set of claim bits for the strong roots processing to work with.
3065 ClassLoaderDataGraph::clear_claimed_marks();
3066
3067 // Mark from roots one level into CMS
3068 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3069 markBitMap());
3070 KlassToOopClosure klass_closure(¬Older);
3071
3072 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3073 gch->gen_process_strong_roots(_cmsGen->level(),
3074 true, // younger gens are roots
3075 true, // activate StrongRootsScope
3076 SharedHeap::ScanningOption(roots_scanning_options()),
3077 ¬Older,
3078 NULL,
3079 &klass_closure);
3080
3081 // Now mark from the roots
3082 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3083 verification_mark_bm(), markBitMap(), verification_mark_stack());
3084 assert(_restart_addr == NULL, "Expected pre-condition");
3085 verification_mark_bm()->iterate(&markFromRootsClosure);
3086 while (_restart_addr != NULL) {
3087 // Deal with stack overflow: by restarting at the indicated
3088 // address.
3089 HeapWord* ra = _restart_addr;
3090 markFromRootsClosure.reset(ra);
3091 _restart_addr = NULL;
3092 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3093 }
3094 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3095 verify_work_stacks_empty();
3096
3097 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3098 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3099
3246 // Condition 1 above
3247 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3248 _should_unload_classes = true;
3249 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3250 // Disjuncts 2.b.(i,ii,iii) above
3251 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3252 CMSClassUnloadingMaxInterval)
3253 || _cmsGen->is_too_full();
3254 }
3255 }
3256
3257 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3258 bool res = should_concurrent_collect();
3259 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3260 return res;
3261 }
3262
3263 void CMSCollector::setup_cms_unloading_and_verification_state() {
3264 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3265 || VerifyBeforeExit;
3266 const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
3267
3268 // We set the proper root for this CMS cycle here.
3269 if (should_unload_classes()) { // Should unload classes this cycle
3270 remove_root_scanning_option(SharedHeap::SO_AllClasses);
3271 add_root_scanning_option(SharedHeap::SO_SystemClasses);
3272 remove_root_scanning_option(rso); // Shrink the root set appropriately
3273 set_verifying(should_verify); // Set verification state for this cycle
3274 return; // Nothing else needs to be done at this time
3275 }
3276
3277 // Not unloading classes this cycle
3278 assert(!should_unload_classes(), "Inconsistency!");
3279 remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3280 add_root_scanning_option(SharedHeap::SO_AllClasses);
3281
3282 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3283 // Include symbols, strings and code cache elements to prevent their resurrection.
3284 add_root_scanning_option(rso);
3285 set_verifying(true);
3286 } else if (verifying() && !should_verify) {
3287 // We were verifying, but some verification flags got disabled.
3288 set_verifying(false);
3289 // Exclude symbols, strings and code cache elements from root scanning to
3290 // reduce IM and RM pauses.
3291 remove_root_scanning_option(rso);
3292 }
3293 }
3294
3295
3296 #ifndef PRODUCT
3297 HeapWord* CMSCollector::block_start(const void* p) const {
3298 const HeapWord* addr = (HeapWord*)p;
3299 if (_span.contains(p)) {
3300 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3668 {
3669 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3670 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3671 // The parallel version.
3672 FlexibleWorkGang* workers = gch->workers();
3673 assert(workers != NULL, "Need parallel worker threads.");
3674 int n_workers = workers->active_workers();
3675 CMSParInitialMarkTask tsk(this, n_workers);
3676 gch->set_par_threads(n_workers);
3677 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3678 if (n_workers > 1) {
3679 GenCollectedHeap::StrongRootsScope srs(gch);
3680 workers->run_task(&tsk);
3681 } else {
3682 GenCollectedHeap::StrongRootsScope srs(gch);
3683 tsk.work(0);
3684 }
3685 gch->set_par_threads(0);
3686 } else {
3687 // The serial version.
3688 KlassToOopClosure klass_closure(¬Older);
3689 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3690 gch->gen_process_strong_roots(_cmsGen->level(),
3691 true, // younger gens are roots
3692 true, // activate StrongRootsScope
3693 SharedHeap::ScanningOption(roots_scanning_options()),
3694 ¬Older,
3695 NULL,
3696 &klass_closure);
3697 }
3698 }
3699
3700 // Clear mod-union table; it will be dirtied in the prologue of
3701 // CMS generation per each younger generation collection.
3702
3703 assert(_modUnionTable.isAllClear(),
3704 "Was cleared in most recent final checkpoint phase"
3705 " or no bits are set in the gc_prologue before the start of the next "
3706 "subsequent marking phase.");
3707
3708 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3709
3710 // Save the end of the used_region of the constituent generations
3711 // to be used to limit the extent of sweep in each generation.
3712 save_sweep_limits();
3713 verify_overflow_empty();
3714 }
3715
3716 bool CMSCollector::markFromRoots(bool asynch) {
5122 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5123
5124 // Change under the freelistLocks.
5125 _collectorState = Sweeping;
5126 // Call isAllClear() under bitMapLock
5127 assert(_modUnionTable.isAllClear(),
5128 "Should be clear by end of the final marking");
5129 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5130 "Should be clear by end of the final marking");
5131 }
5132
5133 void CMSParInitialMarkTask::work(uint worker_id) {
5134 elapsedTimer _timer;
5135 ResourceMark rm;
5136 HandleMark hm;
5137
5138 // ---------- scan from roots --------------
5139 _timer.start();
5140 GenCollectedHeap* gch = GenCollectedHeap::heap();
5141 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5142 KlassToOopClosure klass_closure(&par_mri_cl);
5143
5144 // ---------- young gen roots --------------
5145 {
5146 work_on_young_gen_roots(worker_id, &par_mri_cl);
5147 _timer.stop();
5148 if (PrintCMSStatistics != 0) {
5149 gclog_or_tty->print_cr(
5150 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5151 worker_id, _timer.seconds());
5152 }
5153 }
5154
5155 // ---------- remaining roots --------------
5156 _timer.reset();
5157 _timer.start();
5158 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5159 false, // yg was scanned above
5160 false, // this is parallel code
5161 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5162 &par_mri_cl,
5163 NULL,
5164 &klass_closure);
5165 assert(_collector->should_unload_classes()
5166 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5167 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5168 _timer.stop();
5169 if (PrintCMSStatistics != 0) {
5170 gclog_or_tty->print_cr(
5171 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5172 worker_id, _timer.seconds());
5173 }
5174 }
5175
5176 // Parallel remark task
5177 class CMSParRemarkTask: public CMSParMarkTask {
5178 CompactibleFreeListSpace* _cms_space;
5179
5180 // The per-thread work queues, available here for stealing.
5181 OopTaskQueueSet* _task_queues;
5182 ParallelTaskTerminator _term;
5183
5184 public:
5273 work_queue(worker_id));
5274
5275 // Rescan young gen roots first since these are likely
5276 // coarsely partitioned and may, on that account, constitute
5277 // the critical path; thus, it's best to start off that
5278 // work first.
5279 // ---------- young gen roots --------------
5280 {
5281 work_on_young_gen_roots(worker_id, &par_mrias_cl);
5282 _timer.stop();
5283 if (PrintCMSStatistics != 0) {
5284 gclog_or_tty->print_cr(
5285 "Finished young gen rescan work in %dth thread: %3.3f sec",
5286 worker_id, _timer.seconds());
5287 }
5288 }
5289
5290 // ---------- remaining roots --------------
5291 _timer.reset();
5292 _timer.start();
5293 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5294 false, // yg was scanned above
5295 false, // this is parallel code
5296 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5297 &par_mrias_cl,
5298 NULL,
5299 NULL); // The dirty klasses will be handled below
5300 assert(_collector->should_unload_classes()
5301 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5302 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5303 _timer.stop();
5304 if (PrintCMSStatistics != 0) {
5305 gclog_or_tty->print_cr(
5306 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5307 worker_id, _timer.seconds());
5308 }
5309
5310 // ---------- unhandled CLD scanning ----------
5311 if (worker_id == 0) { // Single threaded at the moment.
5312 _timer.reset();
5313 _timer.start();
5314
5315 // Scan all new class loader data objects and new dependencies that were
5316 // introduced during concurrent marking.
5317 ResourceMark rm;
5318 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5319 for (int i = 0; i < array->length(); i++) {
5334 // ---------- dirty klass scanning ----------
5335 if (worker_id == 0) { // Single threaded at the moment.
5336 _timer.reset();
5337 _timer.start();
5338
5339 // Scan all classes that was dirtied during the concurrent marking phase.
5340 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5341 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5342
5343 _timer.stop();
5344 if (PrintCMSStatistics != 0) {
5345 gclog_or_tty->print_cr(
5346 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5347 worker_id, _timer.seconds());
5348 }
5349 }
5350
5351 // We might have added oops to ClassLoaderData::_handles during the
5352 // concurrent marking phase. These oops point to newly allocated objects
5353 // that are guaranteed to be kept alive. Either by the direct allocation
5354 // code, or when the young collector processes the strong roots. Hence,
5355 // we don't have to revisit the _handles block during the remark phase.
5356
5357 // ---------- rescan dirty cards ------------
5358 _timer.reset();
5359 _timer.start();
5360
5361 // Do the rescan tasks for each of the two spaces
5362 // (cms_space) in turn.
5363 // "worker_id" is passed to select the task_queue for "worker_id"
5364 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5365 _timer.stop();
5366 if (PrintCMSStatistics != 0) {
5367 gclog_or_tty->print_cr(
5368 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5369 worker_id, _timer.seconds());
5370 }
5371
5372 // ---------- steal work from other threads ...
5373 // ---------- ... and drain overflow list.
5374 _timer.reset();
5756 // Parallel version of remark
5757 void CMSCollector::do_remark_parallel() {
5758 GenCollectedHeap* gch = GenCollectedHeap::heap();
5759 FlexibleWorkGang* workers = gch->workers();
5760 assert(workers != NULL, "Need parallel worker threads.");
5761 // Choose to use the number of GC workers most recently set
5762 // into "active_workers". If active_workers is not set, set it
5763 // to ParallelGCThreads.
5764 int n_workers = workers->active_workers();
5765 if (n_workers == 0) {
5766 assert(n_workers > 0, "Should have been set during scavenge");
5767 n_workers = ParallelGCThreads;
5768 workers->set_active_workers(n_workers);
5769 }
5770 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5771
5772 CMSParRemarkTask tsk(this,
5773 cms_space,
5774 n_workers, workers, task_queues());
5775
5776 // Set up for parallel process_strong_roots work.
5777 gch->set_par_threads(n_workers);
5778 // We won't be iterating over the cards in the card table updating
5779 // the younger_gen cards, so we shouldn't call the following else
5780 // the verification code as well as subsequent younger_refs_iterate
5781 // code would get confused. XXX
5782 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5783
5784 // The young gen rescan work will not be done as part of
5785 // process_strong_roots (which currently doesn't knw how to
5786 // parallelize such a scan), but rather will be broken up into
5787 // a set of parallel tasks (via the sampling that the [abortable]
5788 // preclean phase did of EdenSpace, plus the [two] tasks of
5789 // scanning the [two] survivor spaces. Further fine-grain
5790 // parallelization of the scanning of the survivor spaces
5791 // themselves, and of precleaning of the younger gen itself
5792 // is deferred to the future.
5793 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5794
5795 // The dirty card rescan work is broken up into a "sequence"
5796 // of parallel tasks (per constituent space) that are dynamically
5797 // claimed by the parallel threads.
5798 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5799
5800 // It turns out that even when we're using 1 thread, doing the work in a
5801 // separate thread causes wide variance in run times. We can't help this
5802 // in the multi-threaded case, but we special-case n=1 here to get
5803 // repeatable measurements of the 1-thread overhead of the parallel code.
5804 if (n_workers > 1) {
5805 // Make refs discovery MT-safe, if it isn't already: it may not
5862 &markFromDirtyCardsClosure);
5863 verify_work_stacks_empty();
5864 if (PrintCMSStatistics != 0) {
5865 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5866 markFromDirtyCardsClosure.num_dirty_cards());
5867 }
5868 }
5869 }
5870 if (VerifyDuringGC &&
5871 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5872 HandleMark hm; // Discard invalid handles created during verification
5873 Universe::verify();
5874 }
5875 {
5876 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5877
5878 verify_work_stacks_empty();
5879
5880 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5881 GenCollectedHeap::StrongRootsScope srs(gch);
5882 gch->gen_process_strong_roots(_cmsGen->level(),
5883 true, // younger gens as roots
5884 false, // use the local StrongRootsScope
5885 SharedHeap::ScanningOption(roots_scanning_options()),
5886 &mrias_cl,
5887 NULL,
5888 NULL); // The dirty klasses will be handled below
5889
5890 assert(should_unload_classes()
5891 || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5892 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5893 }
5894
5895 {
5896 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5897
5898 verify_work_stacks_empty();
5899
5900 // Scan all class loader data objects that might have been introduced
5901 // during concurrent marking.
5902 ResourceMark rm;
5903 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5904 for (int i = 0; i < array->length(); i++) {
5905 mrias_cl.do_class_loader_data(array->at(i));
5908 // We don't need to keep track of new CLDs anymore.
5909 ClassLoaderDataGraph::remember_new_clds(false);
5910
5911 verify_work_stacks_empty();
5912 }
5913
5914 {
5915 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5916
5917 verify_work_stacks_empty();
5918
5919 RemarkKlassClosure remark_klass_closure(&mrias_cl);
5920 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5921
5922 verify_work_stacks_empty();
5923 }
5924
5925 // We might have added oops to ClassLoaderData::_handles during the
5926 // concurrent marking phase. These oops point to newly allocated objects
5927 // that are guaranteed to be kept alive. Either by the direct allocation
5928 // code, or when the young collector processes the strong roots. Hence,
5929 // we don't have to revisit the _handles block during the remark phase.
5930
5931 verify_work_stacks_empty();
5932 // Restore evacuated mark words, if any, used for overflow list links
5933 if (!CMSOverflowEarlyRestoration) {
5934 restore_preserved_marks_if_any();
5935 }
5936 verify_overflow_empty();
5937 }
5938
5939 ////////////////////////////////////////////////////////
5940 // Parallel Reference Processing Task Proxy Class
5941 ////////////////////////////////////////////////////////
5942 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5943 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5944 CMSCollector* _collector;
5945 CMSBitMap* _mark_bit_map;
5946 const MemRegion _span;
5947 ProcessTask& _task;
5948
6158
6159 if (should_unload_classes()) {
6160 {
6161 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6162
6163 // Unload classes and purge the SystemDictionary.
6164 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6165
6166 // Unload nmethods.
6167 CodeCache::do_unloading(&_is_alive_closure, purged_class);
6168
6169 // Prune dead klasses from subklass/sibling/implementor lists.
6170 Klass::clean_weak_klass_links(&_is_alive_closure);
6171 }
6172
6173 {
6174 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6175 // Clean up unreferenced symbols in symbol table.
6176 SymbolTable::unlink();
6177 }
6178 }
6179
6180 // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6181 // Need to check if we really scanned the StringTable.
6182 if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6183 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6184 // Delete entries for dead interned strings.
6185 StringTable::unlink(&_is_alive_closure);
6186 }
6187
6188 // Restore any preserved marks as a result of mark stack or
6189 // work queue overflow
6190 restore_preserved_marks_if_any(); // done single-threaded for now
6191
6192 rp->set_enqueuing_is_done(true);
6193 if (rp->processing_is_mt()) {
6194 rp->balance_all_queues();
6195 CMSRefProcTaskExecutor task_executor(*this);
6196 rp->enqueue_discovered_references(&task_executor);
6197 } else {
6198 rp->enqueue_discovered_references(NULL);
6199 }
6200 rp->verify_no_references_recorded();
6201 assert(!rp->discovery_enabled(), "should have been disabled");
6202 }
6203
6204 #ifndef PRODUCT
6205 void CMSCollector::check_correct_thread_executing() {
6206 Thread* t = Thread::current();
|
2980 } else if (CMSRemarkVerifyVariant == 2) {
2981 // In this second variant of verification, we flag an error
2982 // (i.e. an object reachable in the new marks-vector not reachable
2983 // in the CMS marks-vector) immediately, also indicating the
2984 // identify of an object (A) that references the unmarked object (B) --
2985 // presumably, a mutation to A failed to be picked up by preclean/remark?
2986 verify_after_remark_work_2();
2987 } else {
2988 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2989 CMSRemarkVerifyVariant);
2990 }
2991 if (!silent) gclog_or_tty->print(" done] ");
2992 return true;
2993 }
2994
2995 void CMSCollector::verify_after_remark_work_1() {
2996 ResourceMark rm;
2997 HandleMark hm;
2998 GenCollectedHeap* gch = GenCollectedHeap::heap();
2999
3000 // Get a clear set of claim bits for the roots processing to work with.
3001 ClassLoaderDataGraph::clear_claimed_marks();
3002
3003 // Mark from roots one level into CMS
3004 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3005 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3006
3007 gch->gen_process_roots(_cmsGen->level(),
3008 true, // younger gens are roots
3009 true, // activate StrongRootsScope
3010 SharedHeap::ScanningOption(roots_scanning_options()),
3011 should_unload_classes(),
3012 ¬Older,
3013 NULL,
3014 NULL); // SSS: Provide correct closure
3015
3016 // Now mark from the roots
3017 MarkFromRootsClosure markFromRootsClosure(this, _span,
3018 verification_mark_bm(), verification_mark_stack(),
3019 false /* don't yield */, true /* verifying */);
3020 assert(_restart_addr == NULL, "Expected pre-condition");
3021 verification_mark_bm()->iterate(&markFromRootsClosure);
3022 while (_restart_addr != NULL) {
3023 // Deal with stack overflow: by restarting at the indicated
3024 // address.
3025 HeapWord* ra = _restart_addr;
3026 markFromRootsClosure.reset(ra);
3027 _restart_addr = NULL;
3028 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3029 }
3030 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3031 verify_work_stacks_empty();
3045 class VerifyKlassOopsKlassClosure : public KlassClosure {
3046 class VerifyKlassOopsClosure : public OopClosure {
3047 CMSBitMap* _bitmap;
3048 public:
3049 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3050 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3051 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3052 } _oop_closure;
3053 public:
3054 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3055 void do_klass(Klass* k) {
3056 k->oops_do(&_oop_closure);
3057 }
3058 };
3059
3060 void CMSCollector::verify_after_remark_work_2() {
3061 ResourceMark rm;
3062 HandleMark hm;
3063 GenCollectedHeap* gch = GenCollectedHeap::heap();
3064
3065 // Get a clear set of claim bits for the roots processing to work with.
3066 ClassLoaderDataGraph::clear_claimed_marks();
3067
3068 // Mark from roots one level into CMS
3069 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3070 markBitMap());
3071 CLDToOopClosure cld_closure(¬Older, true);
3072
3073 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3074
3075 gch->gen_process_roots(_cmsGen->level(),
3076 true, // younger gens are roots
3077 true, // activate StrongRootsScope
3078 SharedHeap::ScanningOption(roots_scanning_options()),
3079 should_unload_classes(),
3080 ¬Older,
3081 NULL,
3082 &cld_closure);
3083
3084 // Now mark from the roots
3085 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3086 verification_mark_bm(), markBitMap(), verification_mark_stack());
3087 assert(_restart_addr == NULL, "Expected pre-condition");
3088 verification_mark_bm()->iterate(&markFromRootsClosure);
3089 while (_restart_addr != NULL) {
3090 // Deal with stack overflow: by restarting at the indicated
3091 // address.
3092 HeapWord* ra = _restart_addr;
3093 markFromRootsClosure.reset(ra);
3094 _restart_addr = NULL;
3095 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3096 }
3097 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3098 verify_work_stacks_empty();
3099
3100 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3101 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3102
3249 // Condition 1 above
3250 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3251 _should_unload_classes = true;
3252 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3253 // Disjuncts 2.b.(i,ii,iii) above
3254 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3255 CMSClassUnloadingMaxInterval)
3256 || _cmsGen->is_too_full();
3257 }
3258 }
3259
3260 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3261 bool res = should_concurrent_collect();
3262 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3263 return res;
3264 }
3265
3266 void CMSCollector::setup_cms_unloading_and_verification_state() {
3267 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3268 || VerifyBeforeExit;
3269 const int rso = SharedHeap::SO_AllCodeCache;
3270
3271 // We set the proper root for this CMS cycle here.
3272 if (should_unload_classes()) { // Should unload classes this cycle
3273 remove_root_scanning_option(rso); // Shrink the root set appropriately
3274 set_verifying(should_verify); // Set verification state for this cycle
3275 return; // Nothing else needs to be done at this time
3276 }
3277
3278 // Not unloading classes this cycle
3279 assert(!should_unload_classes(), "Inconsistency!");
3280
3281 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3282 // Include symbols, strings and code cache elements to prevent their resurrection.
3283 add_root_scanning_option(rso);
3284 set_verifying(true);
3285 } else if (verifying() && !should_verify) {
3286 // We were verifying, but some verification flags got disabled.
3287 set_verifying(false);
3288 // Exclude symbols, strings and code cache elements from root scanning to
3289 // reduce IM and RM pauses.
3290 remove_root_scanning_option(rso);
3291 }
3292 }
3293
3294
3295 #ifndef PRODUCT
3296 HeapWord* CMSCollector::block_start(const void* p) const {
3297 const HeapWord* addr = (HeapWord*)p;
3298 if (_span.contains(p)) {
3299 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3667 {
3668 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3669 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3670 // The parallel version.
3671 FlexibleWorkGang* workers = gch->workers();
3672 assert(workers != NULL, "Need parallel worker threads.");
3673 int n_workers = workers->active_workers();
3674 CMSParInitialMarkTask tsk(this, n_workers);
3675 gch->set_par_threads(n_workers);
3676 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3677 if (n_workers > 1) {
3678 GenCollectedHeap::StrongRootsScope srs(gch);
3679 workers->run_task(&tsk);
3680 } else {
3681 GenCollectedHeap::StrongRootsScope srs(gch);
3682 tsk.work(0);
3683 }
3684 gch->set_par_threads(0);
3685 } else {
3686 // The serial version.
3687 CLDToOopClosure cld_closure(¬Older, true);
3688 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3689 gch->gen_process_roots(_cmsGen->level(),
3690 true, // younger gens are roots
3691 true, // activate StrongRootsScope
3692 SharedHeap::ScanningOption(roots_scanning_options()),
3693 should_unload_classes(),
3694 ¬Older,
3695 NULL,
3696 &cld_closure);
3697 }
3698 }
3699
3700 // Clear mod-union table; it will be dirtied in the prologue of
3701 // CMS generation per each younger generation collection.
3702
3703 assert(_modUnionTable.isAllClear(),
3704 "Was cleared in most recent final checkpoint phase"
3705 " or no bits are set in the gc_prologue before the start of the next "
3706 "subsequent marking phase.");
3707
3708 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3709
3710 // Save the end of the used_region of the constituent generations
3711 // to be used to limit the extent of sweep in each generation.
3712 save_sweep_limits();
3713 verify_overflow_empty();
3714 }
3715
3716 bool CMSCollector::markFromRoots(bool asynch) {
5122 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5123
5124 // Change under the freelistLocks.
5125 _collectorState = Sweeping;
5126 // Call isAllClear() under bitMapLock
5127 assert(_modUnionTable.isAllClear(),
5128 "Should be clear by end of the final marking");
5129 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5130 "Should be clear by end of the final marking");
5131 }
5132
5133 void CMSParInitialMarkTask::work(uint worker_id) {
5134 elapsedTimer _timer;
5135 ResourceMark rm;
5136 HandleMark hm;
5137
5138 // ---------- scan from roots --------------
5139 _timer.start();
5140 GenCollectedHeap* gch = GenCollectedHeap::heap();
5141 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5142
5143 // ---------- young gen roots --------------
5144 {
5145 work_on_young_gen_roots(worker_id, &par_mri_cl);
5146 _timer.stop();
5147 if (PrintCMSStatistics != 0) {
5148 gclog_or_tty->print_cr(
5149 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5150 worker_id, _timer.seconds());
5151 }
5152 }
5153
5154 // ---------- remaining roots --------------
5155 _timer.reset();
5156 _timer.start();
5157
5158 CLDToOopClosure cld_closure(&par_mri_cl, true);
5159
5160 gch->gen_process_roots(_collector->_cmsGen->level(),
5161 false, // yg was scanned above
5162 false, // this is parallel code
5163 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5164 _collector->should_unload_classes(),
5165 &par_mri_cl,
5166 NULL,
5167 &cld_closure);
5168 assert(_collector->should_unload_classes()
5169 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5170 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5171 _timer.stop();
5172 if (PrintCMSStatistics != 0) {
5173 gclog_or_tty->print_cr(
5174 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5175 worker_id, _timer.seconds());
5176 }
5177 }
5178
5179 // Parallel remark task
5180 class CMSParRemarkTask: public CMSParMarkTask {
5181 CompactibleFreeListSpace* _cms_space;
5182
5183 // The per-thread work queues, available here for stealing.
5184 OopTaskQueueSet* _task_queues;
5185 ParallelTaskTerminator _term;
5186
5187 public:
5276 work_queue(worker_id));
5277
5278 // Rescan young gen roots first since these are likely
5279 // coarsely partitioned and may, on that account, constitute
5280 // the critical path; thus, it's best to start off that
5281 // work first.
5282 // ---------- young gen roots --------------
5283 {
5284 work_on_young_gen_roots(worker_id, &par_mrias_cl);
5285 _timer.stop();
5286 if (PrintCMSStatistics != 0) {
5287 gclog_or_tty->print_cr(
5288 "Finished young gen rescan work in %dth thread: %3.3f sec",
5289 worker_id, _timer.seconds());
5290 }
5291 }
5292
5293 // ---------- remaining roots --------------
5294 _timer.reset();
5295 _timer.start();
5296 gch->gen_process_roots(_collector->_cmsGen->level(),
5297 false, // yg was scanned above
5298 false, // this is parallel code
5299 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5300 _collector->should_unload_classes(),
5301 &par_mrias_cl,
5302 NULL,
5303 NULL); // The dirty klasses will be handled below
5304
5305 assert(_collector->should_unload_classes()
5306 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5307 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5308 _timer.stop();
5309 if (PrintCMSStatistics != 0) {
5310 gclog_or_tty->print_cr(
5311 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5312 worker_id, _timer.seconds());
5313 }
5314
5315 // ---------- unhandled CLD scanning ----------
5316 if (worker_id == 0) { // Single threaded at the moment.
5317 _timer.reset();
5318 _timer.start();
5319
5320 // Scan all new class loader data objects and new dependencies that were
5321 // introduced during concurrent marking.
5322 ResourceMark rm;
5323 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5324 for (int i = 0; i < array->length(); i++) {
5339 // ---------- dirty klass scanning ----------
5340 if (worker_id == 0) { // Single threaded at the moment.
5341 _timer.reset();
5342 _timer.start();
5343
5344 // Scan all classes that was dirtied during the concurrent marking phase.
5345 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5346 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5347
5348 _timer.stop();
5349 if (PrintCMSStatistics != 0) {
5350 gclog_or_tty->print_cr(
5351 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5352 worker_id, _timer.seconds());
5353 }
5354 }
5355
5356 // We might have added oops to ClassLoaderData::_handles during the
5357 // concurrent marking phase. These oops point to newly allocated objects
5358 // that are guaranteed to be kept alive. Either by the direct allocation
5359 // code, or when the young collector processes the roots. Hence,
5360 // we don't have to revisit the _handles block during the remark phase.
5361
5362 // ---------- rescan dirty cards ------------
5363 _timer.reset();
5364 _timer.start();
5365
5366 // Do the rescan tasks for each of the two spaces
5367 // (cms_space) in turn.
5368 // "worker_id" is passed to select the task_queue for "worker_id"
5369 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5370 _timer.stop();
5371 if (PrintCMSStatistics != 0) {
5372 gclog_or_tty->print_cr(
5373 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5374 worker_id, _timer.seconds());
5375 }
5376
5377 // ---------- steal work from other threads ...
5378 // ---------- ... and drain overflow list.
5379 _timer.reset();
5761 // Parallel version of remark
5762 void CMSCollector::do_remark_parallel() {
5763 GenCollectedHeap* gch = GenCollectedHeap::heap();
5764 FlexibleWorkGang* workers = gch->workers();
5765 assert(workers != NULL, "Need parallel worker threads.");
5766 // Choose to use the number of GC workers most recently set
5767 // into "active_workers". If active_workers is not set, set it
5768 // to ParallelGCThreads.
5769 int n_workers = workers->active_workers();
5770 if (n_workers == 0) {
5771 assert(n_workers > 0, "Should have been set during scavenge");
5772 n_workers = ParallelGCThreads;
5773 workers->set_active_workers(n_workers);
5774 }
5775 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5776
5777 CMSParRemarkTask tsk(this,
5778 cms_space,
5779 n_workers, workers, task_queues());
5780
5781 // Set up for parallel process_roots work.
5782 gch->set_par_threads(n_workers);
5783 // We won't be iterating over the cards in the card table updating
5784 // the younger_gen cards, so we shouldn't call the following else
5785 // the verification code as well as subsequent younger_refs_iterate
5786 // code would get confused. XXX
5787 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5788
5789 // The young gen rescan work will not be done as part of
5790 // process_roots (which currently doesn't know how to
5791 // parallelize such a scan), but rather will be broken up into
5792 // a set of parallel tasks (via the sampling that the [abortable]
5793 // preclean phase did of EdenSpace, plus the [two] tasks of
5794 // scanning the [two] survivor spaces. Further fine-grain
5795 // parallelization of the scanning of the survivor spaces
5796 // themselves, and of precleaning of the younger gen itself
5797 // is deferred to the future.
5798 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5799
5800 // The dirty card rescan work is broken up into a "sequence"
5801 // of parallel tasks (per constituent space) that are dynamically
5802 // claimed by the parallel threads.
5803 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5804
5805 // It turns out that even when we're using 1 thread, doing the work in a
5806 // separate thread causes wide variance in run times. We can't help this
5807 // in the multi-threaded case, but we special-case n=1 here to get
5808 // repeatable measurements of the 1-thread overhead of the parallel code.
5809 if (n_workers > 1) {
5810 // Make refs discovery MT-safe, if it isn't already: it may not
5867 &markFromDirtyCardsClosure);
5868 verify_work_stacks_empty();
5869 if (PrintCMSStatistics != 0) {
5870 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5871 markFromDirtyCardsClosure.num_dirty_cards());
5872 }
5873 }
5874 }
5875 if (VerifyDuringGC &&
5876 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5877 HandleMark hm; // Discard invalid handles created during verification
5878 Universe::verify();
5879 }
5880 {
5881 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5882
5883 verify_work_stacks_empty();
5884
5885 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5886 GenCollectedHeap::StrongRootsScope srs(gch);
5887
5888 gch->gen_process_roots(_cmsGen->level(),
5889 true, // younger gens as roots
5890 false, // use the local StrongRootsScope
5891 SharedHeap::ScanningOption(roots_scanning_options()),
5892 should_unload_classes(),
5893 &mrias_cl,
5894 NULL,
5895 NULL); // The dirty klasses will be handled below
5896
5897 assert(should_unload_classes()
5898 || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5899 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5900 }
5901
5902 {
5903 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5904
5905 verify_work_stacks_empty();
5906
5907 // Scan all class loader data objects that might have been introduced
5908 // during concurrent marking.
5909 ResourceMark rm;
5910 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5911 for (int i = 0; i < array->length(); i++) {
5912 mrias_cl.do_class_loader_data(array->at(i));
5915 // We don't need to keep track of new CLDs anymore.
5916 ClassLoaderDataGraph::remember_new_clds(false);
5917
5918 verify_work_stacks_empty();
5919 }
5920
5921 {
5922 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5923
5924 verify_work_stacks_empty();
5925
5926 RemarkKlassClosure remark_klass_closure(&mrias_cl);
5927 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5928
5929 verify_work_stacks_empty();
5930 }
5931
5932 // We might have added oops to ClassLoaderData::_handles during the
5933 // concurrent marking phase. These oops point to newly allocated objects
5934 // that are guaranteed to be kept alive. Either by the direct allocation
5935 // code, or when the young collector processes the roots. Hence,
5936 // we don't have to revisit the _handles block during the remark phase.
5937
5938 verify_work_stacks_empty();
5939 // Restore evacuated mark words, if any, used for overflow list links
5940 if (!CMSOverflowEarlyRestoration) {
5941 restore_preserved_marks_if_any();
5942 }
5943 verify_overflow_empty();
5944 }
5945
5946 ////////////////////////////////////////////////////////
5947 // Parallel Reference Processing Task Proxy Class
5948 ////////////////////////////////////////////////////////
5949 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5950 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5951 CMSCollector* _collector;
5952 CMSBitMap* _mark_bit_map;
5953 const MemRegion _span;
5954 ProcessTask& _task;
5955
6165
6166 if (should_unload_classes()) {
6167 {
6168 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6169
6170 // Unload classes and purge the SystemDictionary.
6171 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6172
6173 // Unload nmethods.
6174 CodeCache::do_unloading(&_is_alive_closure, purged_class);
6175
6176 // Prune dead klasses from subklass/sibling/implementor lists.
6177 Klass::clean_weak_klass_links(&_is_alive_closure);
6178 }
6179
6180 {
6181 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6182 // Clean up unreferenced symbols in symbol table.
6183 SymbolTable::unlink();
6184 }
6185
6186 {
6187 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6188 // Delete entries for dead interned strings.
6189 StringTable::unlink(&_is_alive_closure);
6190 }
6191 }
6192
6193
6194 // Restore any preserved marks as a result of mark stack or
6195 // work queue overflow
6196 restore_preserved_marks_if_any(); // done single-threaded for now
6197
6198 rp->set_enqueuing_is_done(true);
6199 if (rp->processing_is_mt()) {
6200 rp->balance_all_queues();
6201 CMSRefProcTaskExecutor task_executor(*this);
6202 rp->enqueue_discovered_references(&task_executor);
6203 } else {
6204 rp->enqueue_discovered_references(NULL);
6205 }
6206 rp->verify_no_references_recorded();
6207 assert(!rp->discovery_enabled(), "should have been disabled");
6208 }
6209
6210 #ifndef PRODUCT
6211 void CMSCollector::check_correct_thread_executing() {
6212 Thread* t = Thread::current();
|