< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 49912 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49913 : imported patch 8201492-stefanj-review
rev 49914 : [mq]: 8201492-kim-review


 273 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 274   assert(io <= 100 && tr <= 100, "Check the arguments");
 275   if (io >= 0) {
 276     _initiating_occupancy = (double)io / 100.0;
 277   } else {
 278     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 279                              (double)(tr * MinHeapFreeRatio) / 100.0)
 280                             / 100.0;
 281   }
 282 }
 283 
 284 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 285   assert(collector() != NULL, "no collector");
 286   collector()->ref_processor_init();
 287 }
 288 
 289 void CMSCollector::ref_processor_init() {
 290   if (_ref_processor == NULL) {
 291     // Allocate and initialize a reference processor
 292     _ref_processor =
 293       new ReferenceProcessor(&_span_discoverer,                      // span
 294                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 295                              ParallelGCThreads,                      // mt processing degree
 296                              _cmsGen->refs_discovery_is_mt(),        // mt discovery
 297                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 298                              _cmsGen->refs_discovery_is_atomic(),    // discovery is not atomic
 299                              &_is_alive_closure);                    // closure for liveness info
 300     // Initialize the _ref_processor field of CMSGen
 301     _cmsGen->set_ref_processor(_ref_processor);
 302 
 303   }
 304 }
 305 
 306 AdaptiveSizePolicy* CMSCollector::size_policy() {
 307   return CMSHeap::heap()->size_policy();
 308 }
 309 
 310 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 311 
 312   const char* gen_name = "old";
 313   GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();


 431               promotion_rate(), cms_allocation_rate());
 432     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 433               cms_consumption_rate(), time_until_cms_gen_full());
 434   }
 435   st->cr();
 436 }
 437 #endif // #ifndef PRODUCT
 438 
 439 CMSCollector::CollectorState CMSCollector::_collectorState =
 440                              CMSCollector::Idling;
 441 bool CMSCollector::_foregroundGCIsActive = false;
 442 bool CMSCollector::_foregroundGCShouldWait = false;
 443 
 444 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 445                            CardTableRS*                   ct,
 446                            ConcurrentMarkSweepPolicy*     cp):
 447   _cmsGen(cmsGen),
 448   // Adjust span to cover old (cms) gen
 449   _span(cmsGen->reserved()),
 450   _ct(ct),
 451   _span_discoverer(_span),
 452   _ref_processor(NULL),    // will be set later
 453   _conc_workers(NULL),     // may be set later
 454   _abort_preclean(false),
 455   _start_sampling(false),
 456   _between_prologue_and_epilogue(false),
 457   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 458   _modUnionTable((CardTable::card_shift - LogHeapWordSize),
 459                  -1 /* lock-free */, "No_lock" /* dummy */),
 460   _modUnionClosurePar(&_modUnionTable),
 461   // Construct the is_alive_closure with _span & markBitMap
 462   _is_alive_closure(_span, &_markBitMap),
 463   _restart_addr(NULL),
 464   _overflow_list(NULL),
 465   _stats(cmsGen),
 466   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 467                              //verify that this lock should be acquired with safepoint check.
 468                              Monitor::_safepoint_check_sometimes)),
 469   _eden_chunk_array(NULL),     // may be set in ctor body
 470   _eden_chunk_capacity(0),     // -- ditto --
 471   _eden_chunk_index(0),        // -- ditto --


3744     }
3745   }
3746 }
3747 
3748 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3749   assert(_collectorState == Precleaning ||
3750          _collectorState == AbortablePreclean, "incorrect state");
3751   ResourceMark rm;
3752   HandleMark   hm;
3753 
3754   // Precleaning is currently not MT but the reference processor
3755   // may be set for MT.  Disable it temporarily here.
3756   ReferenceProcessor* rp = ref_processor();
3757   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3758 
3759   // Do one pass of scrubbing the discovered reference lists
3760   // to remove any reference objects with strongly-reachable
3761   // referents.
3762   if (clean_refs) {
3763     CMSPrecleanRefsYieldClosure yield_cl(this);
3764     assert(_span_discoverer.span().equals(_span), "Spans should be equal");
3765     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3766                                    &_markStack, true /* preclean */);
3767     CMSDrainMarkingStackClosure complete_trace(this,
3768                                    _span, &_markBitMap, &_markStack,
3769                                    &keep_alive, true /* preclean */);
3770 
3771     // We don't want this step to interfere with a young
3772     // collection because we don't want to take CPU
3773     // or memory bandwidth away from the young GC threads
3774     // (which may be as many as there are CPUs).
3775     // Note that we don't need to protect ourselves from
3776     // interference with mutators because they can't
3777     // manipulate the discovered reference lists nor affect
3778     // the computed reachability of the referents, the
3779     // only properties manipulated by the precleaning
3780     // of these reference lists.
3781     stopTimer();
3782     CMSTokenSyncWithLocks x(true /* is cms thread */,
3783                             bitMapLock());
3784     startTimer();


5157                               _collector.markBitMap(),
5158                               workers, _collector.task_queues());
5159   workers->run_task(&rp_task);
5160 }
5161 
5162 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5163 {
5164 
5165   CMSHeap* heap = CMSHeap::heap();
5166   WorkGang* workers = heap->workers();
5167   assert(workers != NULL, "Need parallel worker threads.");
5168   CMSRefEnqueueTaskProxy enq_task(task);
5169   workers->run_task(&enq_task);
5170 }
5171 
5172 void CMSCollector::refProcessingWork() {
5173   ResourceMark rm;
5174   HandleMark   hm;
5175 
5176   ReferenceProcessor* rp = ref_processor();
5177   assert(_span_discoverer.span().equals(_span), "Spans should be equal");
5178   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5179   // Process weak references.
5180   rp->setup_policy(false);
5181   verify_work_stacks_empty();
5182 
5183   ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
5184   {
5185     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5186 
5187     // Setup keep_alive and complete closures.
5188     CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5189                                             &_markStack, false /* !preclean */);
5190     CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5191                                   _span, &_markBitMap, &_markStack,
5192                                   &cmsKeepAliveClosure, false /* !preclean */);
5193 
5194     ReferenceProcessorStats stats;
5195     if (rp->processing_is_mt()) {
5196       // Set the degree of MT here.  If the discovery is done MT, there
5197       // may have been a different number of threads doing the discovery




 273 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 274   assert(io <= 100 && tr <= 100, "Check the arguments");
 275   if (io >= 0) {
 276     _initiating_occupancy = (double)io / 100.0;
 277   } else {
 278     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 279                              (double)(tr * MinHeapFreeRatio) / 100.0)
 280                             / 100.0;
 281   }
 282 }
 283 
 284 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 285   assert(collector() != NULL, "no collector");
 286   collector()->ref_processor_init();
 287 }
 288 
 289 void CMSCollector::ref_processor_init() {
 290   if (_ref_processor == NULL) {
 291     // Allocate and initialize a reference processor
 292     _ref_processor =
 293       new ReferenceProcessor(&_span_based_discoverer,
 294                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 295                              ParallelGCThreads,                      // mt processing degree
 296                              _cmsGen->refs_discovery_is_mt(),        // mt discovery
 297                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 298                              _cmsGen->refs_discovery_is_atomic(),    // discovery is not atomic
 299                              &_is_alive_closure);                    // closure for liveness info
 300     // Initialize the _ref_processor field of CMSGen
 301     _cmsGen->set_ref_processor(_ref_processor);
 302 
 303   }
 304 }
 305 
 306 AdaptiveSizePolicy* CMSCollector::size_policy() {
 307   return CMSHeap::heap()->size_policy();
 308 }
 309 
 310 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 311 
 312   const char* gen_name = "old";
 313   GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();


 431               promotion_rate(), cms_allocation_rate());
 432     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 433               cms_consumption_rate(), time_until_cms_gen_full());
 434   }
 435   st->cr();
 436 }
 437 #endif // #ifndef PRODUCT
 438 
 439 CMSCollector::CollectorState CMSCollector::_collectorState =
 440                              CMSCollector::Idling;
 441 bool CMSCollector::_foregroundGCIsActive = false;
 442 bool CMSCollector::_foregroundGCShouldWait = false;
 443 
 444 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 445                            CardTableRS*                   ct,
 446                            ConcurrentMarkSweepPolicy*     cp):
 447   _cmsGen(cmsGen),
 448   // Adjust span to cover old (cms) gen
 449   _span(cmsGen->reserved()),
 450   _ct(ct),
 451   _span_based_discoverer(_span),
 452   _ref_processor(NULL),    // will be set later
 453   _conc_workers(NULL),     // may be set later
 454   _abort_preclean(false),
 455   _start_sampling(false),
 456   _between_prologue_and_epilogue(false),
 457   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 458   _modUnionTable((CardTable::card_shift - LogHeapWordSize),
 459                  -1 /* lock-free */, "No_lock" /* dummy */),
 460   _modUnionClosurePar(&_modUnionTable),
 461   // Construct the is_alive_closure with _span & markBitMap
 462   _is_alive_closure(_span, &_markBitMap),
 463   _restart_addr(NULL),
 464   _overflow_list(NULL),
 465   _stats(cmsGen),
 466   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 467                              //verify that this lock should be acquired with safepoint check.
 468                              Monitor::_safepoint_check_sometimes)),
 469   _eden_chunk_array(NULL),     // may be set in ctor body
 470   _eden_chunk_capacity(0),     // -- ditto --
 471   _eden_chunk_index(0),        // -- ditto --


3744     }
3745   }
3746 }
3747 
3748 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3749   assert(_collectorState == Precleaning ||
3750          _collectorState == AbortablePreclean, "incorrect state");
3751   ResourceMark rm;
3752   HandleMark   hm;
3753 
3754   // Precleaning is currently not MT but the reference processor
3755   // may be set for MT.  Disable it temporarily here.
3756   ReferenceProcessor* rp = ref_processor();
3757   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3758 
3759   // Do one pass of scrubbing the discovered reference lists
3760   // to remove any reference objects with strongly-reachable
3761   // referents.
3762   if (clean_refs) {
3763     CMSPrecleanRefsYieldClosure yield_cl(this);
3764     assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
3765     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3766                                    &_markStack, true /* preclean */);
3767     CMSDrainMarkingStackClosure complete_trace(this,
3768                                    _span, &_markBitMap, &_markStack,
3769                                    &keep_alive, true /* preclean */);
3770 
3771     // We don't want this step to interfere with a young
3772     // collection because we don't want to take CPU
3773     // or memory bandwidth away from the young GC threads
3774     // (which may be as many as there are CPUs).
3775     // Note that we don't need to protect ourselves from
3776     // interference with mutators because they can't
3777     // manipulate the discovered reference lists nor affect
3778     // the computed reachability of the referents, the
3779     // only properties manipulated by the precleaning
3780     // of these reference lists.
3781     stopTimer();
3782     CMSTokenSyncWithLocks x(true /* is cms thread */,
3783                             bitMapLock());
3784     startTimer();


5157                               _collector.markBitMap(),
5158                               workers, _collector.task_queues());
5159   workers->run_task(&rp_task);
5160 }
5161 
5162 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5163 {
5164 
5165   CMSHeap* heap = CMSHeap::heap();
5166   WorkGang* workers = heap->workers();
5167   assert(workers != NULL, "Need parallel worker threads.");
5168   CMSRefEnqueueTaskProxy enq_task(task);
5169   workers->run_task(&enq_task);
5170 }
5171 
5172 void CMSCollector::refProcessingWork() {
5173   ResourceMark rm;
5174   HandleMark   hm;
5175 
5176   ReferenceProcessor* rp = ref_processor();
5177   assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
5178   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5179   // Process weak references.
5180   rp->setup_policy(false);
5181   verify_work_stacks_empty();
5182 
5183   ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
5184   {
5185     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5186 
5187     // Setup keep_alive and complete closures.
5188     CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5189                                             &_markStack, false /* !preclean */);
5190     CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5191                                   _span, &_markBitMap, &_markStack,
5192                                   &cmsKeepAliveClosure, false /* !preclean */);
5193 
5194     ReferenceProcessorStats stats;
5195     if (rp->processing_is_mt()) {
5196       // Set the degree of MT here.  If the discovery is done MT, there
5197       // may have been a different number of threads doing the discovery


< prev index next >