36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
37 #include "gc_implementation/parNew/parNewGeneration.hpp"
38 #include "gc_implementation/shared/collectorCounters.hpp"
39 #include "gc_implementation/shared/gcTimer.hpp"
40 #include "gc_implementation/shared/gcTrace.hpp"
41 #include "gc_implementation/shared/gcTraceTime.hpp"
42 #include "gc_implementation/shared/isGCActiveMark.hpp"
43 #include "gc_interface/collectedHeap.inline.hpp"
44 #include "memory/allocation.hpp"
45 #include "memory/cardGeneration.inline.hpp"
46 #include "memory/cardTableRS.hpp"
47 #include "memory/collectorPolicy.hpp"
48 #include "memory/gcLocker.inline.hpp"
49 #include "memory/genCollectedHeap.hpp"
50 #include "memory/genMarkSweep.hpp"
51 #include "memory/genOopClosures.inline.hpp"
52 #include "memory/iterator.inline.hpp"
53 #include "memory/padded.hpp"
54 #include "memory/referencePolicy.hpp"
55 #include "memory/resourceArea.hpp"
56 #include "memory/tenuredGeneration.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "prims/jvmtiExport.hpp"
59 #include "runtime/atomic.inline.hpp"
60 #include "runtime/globals_extension.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/java.hpp"
63 #include "runtime/orderAccess.inline.hpp"
64 #include "runtime/vmThread.hpp"
65 #include "services/memoryService.hpp"
66 #include "services/runtimeService.hpp"
67
68 // statics
69 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
70 bool CMSCollector::_full_gc_requested = false;
71 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
72
73 //////////////////////////////////////////////////////////////////
74 // In support of CMS/VM thread synchronization
75 //////////////////////////////////////////////////////////////////
3054
3055 // Whenever a CLD is found, it will be claimed before proceeding to mark
3056 // the klasses. The claimed marks need to be cleared before marking starts.
3057 ClassLoaderDataGraph::clear_claimed_marks();
3058
3059 if (CMSPrintEdenSurvivorChunks) {
3060 print_eden_and_survivor_chunk_arrays();
3061 }
3062
3063 {
3064 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3065 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3066 // The parallel version.
3067 FlexibleWorkGang* workers = gch->workers();
3068 assert(workers != NULL, "Need parallel worker threads.");
3069 int n_workers = workers->active_workers();
3070 CMSParInitialMarkTask tsk(this, n_workers);
3071 gch->set_par_threads(n_workers);
3072 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3073 if (n_workers > 1) {
3074 GenCollectedHeap::StrongRootsScope srs;
3075 workers->run_task(&tsk);
3076 } else {
3077 GenCollectedHeap::StrongRootsScope srs;
3078 tsk.work(0);
3079 }
3080 gch->set_par_threads(0);
3081 } else {
3082 // The serial version.
3083 CLDToOopClosure cld_closure(¬Older, true);
3084 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3085 gch->gen_process_roots(_cmsGen->level(),
3086 true, // younger gens are roots
3087 true, // activate StrongRootsScope
3088 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3089 should_unload_classes(),
3090 ¬Older,
3091 NULL,
3092 &cld_closure);
3093 }
3094 }
3095
3096 // Clear mod-union table; it will be dirtied in the prologue of
3097 // CMS generation per each younger generation collection.
5152 // scanning the [two] survivor spaces. Further fine-grain
5153 // parallelization of the scanning of the survivor spaces
5154 // themselves, and of precleaning of the younger gen itself
5155 // is deferred to the future.
5156 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5157
5158 // The dirty card rescan work is broken up into a "sequence"
5159 // of parallel tasks (per constituent space) that are dynamically
5160 // claimed by the parallel threads.
5161 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5162
5163 // It turns out that even when we're using 1 thread, doing the work in a
5164 // separate thread causes wide variance in run times. We can't help this
5165 // in the multi-threaded case, but we special-case n=1 here to get
5166 // repeatable measurements of the 1-thread overhead of the parallel code.
5167 if (n_workers > 1) {
5168 // Make refs discovery MT-safe, if it isn't already: it may not
5169 // necessarily be so, since it's possible that we are doing
5170 // ST marking.
5171 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5172 GenCollectedHeap::StrongRootsScope srs;
5173 workers->run_task(&tsk);
5174 } else {
5175 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5176 GenCollectedHeap::StrongRootsScope srs;
5177 tsk.work(0);
5178 }
5179
5180 gch->set_par_threads(0); // 0 ==> non-parallel.
5181 // restore, single-threaded for now, any preserved marks
5182 // as a result of work_q overflow
5183 restore_preserved_marks_if_any();
5184 }
5185
5186 // Non-parallel version of remark
5187 void CMSCollector::do_remark_non_parallel() {
5188 ResourceMark rm;
5189 HandleMark hm;
5190 GenCollectedHeap* gch = GenCollectedHeap::heap();
5191 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5192
5193 MarkRefsIntoAndScanClosure
5194 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5195 &_markStack, this,
5196 false /* should_yield */, false /* not precleaning */);
5224 _modUnionTable.dirty_range_iterate_clear(cms_span,
5225 &markFromDirtyCardsClosure);
5226 verify_work_stacks_empty();
5227 if (PrintCMSStatistics != 0) {
5228 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5229 markFromDirtyCardsClosure.num_dirty_cards());
5230 }
5231 }
5232 }
5233 if (VerifyDuringGC &&
5234 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5235 HandleMark hm; // Discard invalid handles created during verification
5236 Universe::verify();
5237 }
5238 {
5239 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5240
5241 verify_work_stacks_empty();
5242
5243 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5244 GenCollectedHeap::StrongRootsScope srs;
5245
5246 gch->gen_process_roots(_cmsGen->level(),
5247 true, // younger gens as roots
5248 false, // use the local StrongRootsScope
5249 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5250 should_unload_classes(),
5251 &mrias_cl,
5252 NULL,
5253 NULL); // The dirty klasses will be handled below
5254
5255 assert(should_unload_classes()
5256 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5257 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5258 }
5259
5260 {
5261 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5262
5263 verify_work_stacks_empty();
5264
|
36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
37 #include "gc_implementation/parNew/parNewGeneration.hpp"
38 #include "gc_implementation/shared/collectorCounters.hpp"
39 #include "gc_implementation/shared/gcTimer.hpp"
40 #include "gc_implementation/shared/gcTrace.hpp"
41 #include "gc_implementation/shared/gcTraceTime.hpp"
42 #include "gc_implementation/shared/isGCActiveMark.hpp"
43 #include "gc_interface/collectedHeap.inline.hpp"
44 #include "memory/allocation.hpp"
45 #include "memory/cardGeneration.inline.hpp"
46 #include "memory/cardTableRS.hpp"
47 #include "memory/collectorPolicy.hpp"
48 #include "memory/gcLocker.inline.hpp"
49 #include "memory/genCollectedHeap.hpp"
50 #include "memory/genMarkSweep.hpp"
51 #include "memory/genOopClosures.inline.hpp"
52 #include "memory/iterator.inline.hpp"
53 #include "memory/padded.hpp"
54 #include "memory/referencePolicy.hpp"
55 #include "memory/resourceArea.hpp"
56 #include "memory/strongRootsScope.hpp"
57 #include "memory/tenuredGeneration.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "prims/jvmtiExport.hpp"
60 #include "runtime/atomic.inline.hpp"
61 #include "runtime/globals_extension.hpp"
62 #include "runtime/handles.inline.hpp"
63 #include "runtime/java.hpp"
64 #include "runtime/orderAccess.inline.hpp"
65 #include "runtime/vmThread.hpp"
66 #include "services/memoryService.hpp"
67 #include "services/runtimeService.hpp"
68
69 // statics
70 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
71 bool CMSCollector::_full_gc_requested = false;
72 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
73
74 //////////////////////////////////////////////////////////////////
75 // In support of CMS/VM thread synchronization
76 //////////////////////////////////////////////////////////////////
3055
3056 // Whenever a CLD is found, it will be claimed before proceeding to mark
3057 // the klasses. The claimed marks need to be cleared before marking starts.
3058 ClassLoaderDataGraph::clear_claimed_marks();
3059
3060 if (CMSPrintEdenSurvivorChunks) {
3061 print_eden_and_survivor_chunk_arrays();
3062 }
3063
3064 {
3065 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3066 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3067 // The parallel version.
3068 FlexibleWorkGang* workers = gch->workers();
3069 assert(workers != NULL, "Need parallel worker threads.");
3070 int n_workers = workers->active_workers();
3071 CMSParInitialMarkTask tsk(this, n_workers);
3072 gch->set_par_threads(n_workers);
3073 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3074 if (n_workers > 1) {
3075 StrongRootsScope srs;
3076 workers->run_task(&tsk);
3077 } else {
3078 StrongRootsScope srs;
3079 tsk.work(0);
3080 }
3081 gch->set_par_threads(0);
3082 } else {
3083 // The serial version.
3084 CLDToOopClosure cld_closure(¬Older, true);
3085 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3086 gch->gen_process_roots(_cmsGen->level(),
3087 true, // younger gens are roots
3088 true, // activate StrongRootsScope
3089 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3090 should_unload_classes(),
3091 ¬Older,
3092 NULL,
3093 &cld_closure);
3094 }
3095 }
3096
3097 // Clear mod-union table; it will be dirtied in the prologue of
3098 // CMS generation per each younger generation collection.
5153 // scanning the [two] survivor spaces. Further fine-grain
5154 // parallelization of the scanning of the survivor spaces
5155 // themselves, and of precleaning of the younger gen itself
5156 // is deferred to the future.
5157 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5158
5159 // The dirty card rescan work is broken up into a "sequence"
5160 // of parallel tasks (per constituent space) that are dynamically
5161 // claimed by the parallel threads.
5162 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5163
5164 // It turns out that even when we're using 1 thread, doing the work in a
5165 // separate thread causes wide variance in run times. We can't help this
5166 // in the multi-threaded case, but we special-case n=1 here to get
5167 // repeatable measurements of the 1-thread overhead of the parallel code.
5168 if (n_workers > 1) {
5169 // Make refs discovery MT-safe, if it isn't already: it may not
5170 // necessarily be so, since it's possible that we are doing
5171 // ST marking.
5172 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5173 StrongRootsScope srs;
5174 workers->run_task(&tsk);
5175 } else {
5176 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5177 StrongRootsScope srs;
5178 tsk.work(0);
5179 }
5180
5181 gch->set_par_threads(0); // 0 ==> non-parallel.
5182 // restore, single-threaded for now, any preserved marks
5183 // as a result of work_q overflow
5184 restore_preserved_marks_if_any();
5185 }
5186
5187 // Non-parallel version of remark
5188 void CMSCollector::do_remark_non_parallel() {
5189 ResourceMark rm;
5190 HandleMark hm;
5191 GenCollectedHeap* gch = GenCollectedHeap::heap();
5192 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5193
5194 MarkRefsIntoAndScanClosure
5195 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5196 &_markStack, this,
5197 false /* should_yield */, false /* not precleaning */);
5225 _modUnionTable.dirty_range_iterate_clear(cms_span,
5226 &markFromDirtyCardsClosure);
5227 verify_work_stacks_empty();
5228 if (PrintCMSStatistics != 0) {
5229 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5230 markFromDirtyCardsClosure.num_dirty_cards());
5231 }
5232 }
5233 }
5234 if (VerifyDuringGC &&
5235 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5236 HandleMark hm; // Discard invalid handles created during verification
5237 Universe::verify();
5238 }
5239 {
5240 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5241
5242 verify_work_stacks_empty();
5243
5244 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5245 StrongRootsScope srs;
5246
5247 gch->gen_process_roots(_cmsGen->level(),
5248 true, // younger gens as roots
5249 false, // use the local StrongRootsScope
5250 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5251 should_unload_classes(),
5252 &mrias_cl,
5253 NULL,
5254 NULL); // The dirty klasses will be handled below
5255
5256 assert(should_unload_classes()
5257 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5258 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5259 }
5260
5261 {
5262 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5263
5264 verify_work_stacks_empty();
5265
|