273 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
274 assert(io <= 100 && tr <= 100, "Check the arguments");
275 if (io >= 0) {
276 _initiating_occupancy = (double)io / 100.0;
277 } else {
278 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
279 (double)(tr * MinHeapFreeRatio) / 100.0)
280 / 100.0;
281 }
282 }
283
284 void ConcurrentMarkSweepGeneration::ref_processor_init() {
285 assert(collector() != NULL, "no collector");
286 collector()->ref_processor_init();
287 }
288
289 void CMSCollector::ref_processor_init() {
290 if (_ref_processor == NULL) {
291 // Allocate and initialize a reference processor
292 _ref_processor =
293 new ReferenceProcessor(_span, // span
294 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
295 ParallelGCThreads, // mt processing degree
296 _cmsGen->refs_discovery_is_mt(), // mt discovery
297 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
298 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
299 &_is_alive_closure); // closure for liveness info
300 // Initialize the _ref_processor field of CMSGen
301 _cmsGen->set_ref_processor(_ref_processor);
302
303 }
304 }
305
306 AdaptiveSizePolicy* CMSCollector::size_policy() {
307 return CMSHeap::heap()->size_policy();
308 }
309
310 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
311
312 const char* gen_name = "old";
313 GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
428
429 if (valid()) {
430 st->print(",promo_rate=%g,cms_alloc_rate=%g",
431 promotion_rate(), cms_allocation_rate());
432 st->print(",cms_consumption_rate=%g,time_until_full=%g",
433 cms_consumption_rate(), time_until_cms_gen_full());
434 }
435 st->cr();
436 }
437 #endif // #ifndef PRODUCT
438
439 CMSCollector::CollectorState CMSCollector::_collectorState =
440 CMSCollector::Idling;
441 bool CMSCollector::_foregroundGCIsActive = false;
442 bool CMSCollector::_foregroundGCShouldWait = false;
443
444 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
445 CardTableRS* ct,
446 ConcurrentMarkSweepPolicy* cp):
447 _cmsGen(cmsGen),
448 _ct(ct),
449 _ref_processor(NULL), // will be set later
450 _conc_workers(NULL), // may be set later
451 _abort_preclean(false),
452 _start_sampling(false),
453 _between_prologue_and_epilogue(false),
454 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
455 _modUnionTable((CardTable::card_shift - LogHeapWordSize),
456 -1 /* lock-free */, "No_lock" /* dummy */),
457 _modUnionClosurePar(&_modUnionTable),
458 // Adjust my span to cover old (cms) gen
459 _span(cmsGen->reserved()),
460 // Construct the is_alive_closure with _span & markBitMap
461 _is_alive_closure(_span, &_markBitMap),
462 _restart_addr(NULL),
463 _overflow_list(NULL),
464 _stats(cmsGen),
465 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
466 //verify that this lock should be acquired with safepoint check.
467 Monitor::_safepoint_check_sometimes)),
468 _eden_chunk_array(NULL), // may be set in ctor body
469 _eden_chunk_capacity(0), // -- ditto --
470 _eden_chunk_index(0), // -- ditto --
471 _survivor_plab_array(NULL), // -- ditto --
472 _survivor_chunk_array(NULL), // -- ditto --
473 _survivor_chunk_capacity(0), // -- ditto --
474 _survivor_chunk_index(0), // -- ditto --
475 _ser_pmc_preclean_ovflw(0),
476 _ser_kac_preclean_ovflw(0),
477 _ser_pmc_remark_ovflw(0),
478 _par_pmc_remark_ovflw(0),
479 _ser_kac_ovflw(0),
3727 // initialized. So we'll instead do the check when we _use_ this sample
3728 // later.
3729 if (_eden_chunk_index == 0 ||
3730 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3731 _eden_chunk_array[_eden_chunk_index-1])
3732 >= CMSSamplingGrain)) {
3733 _eden_chunk_index++; // commit sample
3734 }
3735 }
3736 }
3737 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3738 size_t used = get_eden_used();
3739 size_t capacity = get_eden_capacity();
3740 assert(used <= capacity, "Unexpected state of Eden");
3741 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3742 _abort_preclean = true;
3743 }
3744 }
3745 }
3746
3747
3748 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3749 assert(_collectorState == Precleaning ||
3750 _collectorState == AbortablePreclean, "incorrect state");
3751 ResourceMark rm;
3752 HandleMark hm;
3753
3754 // Precleaning is currently not MT but the reference processor
3755 // may be set for MT. Disable it temporarily here.
3756 ReferenceProcessor* rp = ref_processor();
3757 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3758
3759 // Do one pass of scrubbing the discovered reference lists
3760 // to remove any reference objects with strongly-reachable
3761 // referents.
3762 if (clean_refs) {
3763 CMSPrecleanRefsYieldClosure yield_cl(this);
3764 assert(rp->span().equals(_span), "Spans should be equal");
3765 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3766 &_markStack, true /* preclean */);
3767 CMSDrainMarkingStackClosure complete_trace(this,
3768 _span, &_markBitMap, &_markStack,
3769 &keep_alive, true /* preclean */);
3770
3771 // We don't want this step to interfere with a young
3772 // collection because we don't want to take CPU
3773 // or memory bandwidth away from the young GC threads
3774 // (which may be as many as there are CPUs).
3775 // Note that we don't need to protect ourselves from
3776 // interference with mutators because they can't
3777 // manipulate the discovered reference lists nor affect
3778 // the computed reachability of the referents, the
3779 // only properties manipulated by the precleaning
3780 // of these reference lists.
3781 stopTimer();
3782 CMSTokenSyncWithLocks x(true /* is cms thread */,
3783 bitMapLock());
3784 startTimer();
5136 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5137 NOT_PRODUCT(num_steals++;)
5138 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
5139 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5140 // Do scanning work
5141 obj_to_scan->oop_iterate(keep_alive);
5142 // Loop around, finish this work, and try to steal some more
5143 } else if (terminator()->offer_termination()) {
5144 break; // nirvana from the infinite cycle
5145 }
5146 }
5147 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5148 }
5149
5150 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5151 {
5152 CMSHeap* heap = CMSHeap::heap();
5153 WorkGang* workers = heap->workers();
5154 assert(workers != NULL, "Need parallel worker threads.");
5155 CMSRefProcTaskProxy rp_task(task, &_collector,
5156 _collector.ref_processor()->span(),
5157 _collector.markBitMap(),
5158 workers, _collector.task_queues());
5159 workers->run_task(&rp_task);
5160 }
5161
5162 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5163 {
5164
5165 CMSHeap* heap = CMSHeap::heap();
5166 WorkGang* workers = heap->workers();
5167 assert(workers != NULL, "Need parallel worker threads.");
5168 CMSRefEnqueueTaskProxy enq_task(task);
5169 workers->run_task(&enq_task);
5170 }
5171
5172 void CMSCollector::refProcessingWork() {
5173 ResourceMark rm;
5174 HandleMark hm;
5175
5176 ReferenceProcessor* rp = ref_processor();
5177 assert(rp->span().equals(_span), "Spans should be equal");
5178 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5179 // Process weak references.
5180 rp->setup_policy(false);
5181 verify_work_stacks_empty();
5182
5183 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
5184 {
5185 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5186
5187 // Setup keep_alive and complete closures.
5188 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5189 &_markStack, false /* !preclean */);
5190 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5191 _span, &_markBitMap, &_markStack,
5192 &cmsKeepAliveClosure, false /* !preclean */);
5193
5194 ReferenceProcessorStats stats;
5195 if (rp->processing_is_mt()) {
5196 // Set the degree of MT here. If the discovery is done MT, there
5197 // may have been a different number of threads doing the discovery
|
273 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
274 assert(io <= 100 && tr <= 100, "Check the arguments");
275 if (io >= 0) {
276 _initiating_occupancy = (double)io / 100.0;
277 } else {
278 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
279 (double)(tr * MinHeapFreeRatio) / 100.0)
280 / 100.0;
281 }
282 }
283
284 void ConcurrentMarkSweepGeneration::ref_processor_init() {
285 assert(collector() != NULL, "no collector");
286 collector()->ref_processor_init();
287 }
288
289 void CMSCollector::ref_processor_init() {
290 if (_ref_processor == NULL) {
291 // Allocate and initialize a reference processor
292 _ref_processor =
293 new ReferenceProcessor(&_span_discoverer, // span
294 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
295 ParallelGCThreads, // mt processing degree
296 _cmsGen->refs_discovery_is_mt(), // mt discovery
297 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
298 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
299 &_is_alive_closure); // closure for liveness info
300 // Initialize the _ref_processor field of CMSGen
301 _cmsGen->set_ref_processor(_ref_processor);
302
303 }
304 }
305
306 AdaptiveSizePolicy* CMSCollector::size_policy() {
307 return CMSHeap::heap()->size_policy();
308 }
309
310 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
311
312 const char* gen_name = "old";
313 GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
428
429 if (valid()) {
430 st->print(",promo_rate=%g,cms_alloc_rate=%g",
431 promotion_rate(), cms_allocation_rate());
432 st->print(",cms_consumption_rate=%g,time_until_full=%g",
433 cms_consumption_rate(), time_until_cms_gen_full());
434 }
435 st->cr();
436 }
437 #endif // #ifndef PRODUCT
438
439 CMSCollector::CollectorState CMSCollector::_collectorState =
440 CMSCollector::Idling;
441 bool CMSCollector::_foregroundGCIsActive = false;
442 bool CMSCollector::_foregroundGCShouldWait = false;
443
444 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
445 CardTableRS* ct,
446 ConcurrentMarkSweepPolicy* cp):
447 _cmsGen(cmsGen),
448 // Adjust span to cover old (cms) gen
449 _span(cmsGen->reserved()),
450 _ct(ct),
451 _span_discoverer(_span),
452 _ref_processor(NULL), // will be set later
453 _conc_workers(NULL), // may be set later
454 _abort_preclean(false),
455 _start_sampling(false),
456 _between_prologue_and_epilogue(false),
457 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
458 _modUnionTable((CardTable::card_shift - LogHeapWordSize),
459 -1 /* lock-free */, "No_lock" /* dummy */),
460 _modUnionClosurePar(&_modUnionTable),
461 // Construct the is_alive_closure with _span & markBitMap
462 _is_alive_closure(_span, &_markBitMap),
463 _restart_addr(NULL),
464 _overflow_list(NULL),
465 _stats(cmsGen),
466 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
467 //verify that this lock should be acquired with safepoint check.
468 Monitor::_safepoint_check_sometimes)),
469 _eden_chunk_array(NULL), // may be set in ctor body
470 _eden_chunk_capacity(0), // -- ditto --
471 _eden_chunk_index(0), // -- ditto --
472 _survivor_plab_array(NULL), // -- ditto --
473 _survivor_chunk_array(NULL), // -- ditto --
474 _survivor_chunk_capacity(0), // -- ditto --
475 _survivor_chunk_index(0), // -- ditto --
476 _ser_pmc_preclean_ovflw(0),
477 _ser_kac_preclean_ovflw(0),
478 _ser_pmc_remark_ovflw(0),
479 _par_pmc_remark_ovflw(0),
480 _ser_kac_ovflw(0),
3728 // initialized. So we'll instead do the check when we _use_ this sample
3729 // later.
3730 if (_eden_chunk_index == 0 ||
3731 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3732 _eden_chunk_array[_eden_chunk_index-1])
3733 >= CMSSamplingGrain)) {
3734 _eden_chunk_index++; // commit sample
3735 }
3736 }
3737 }
3738 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3739 size_t used = get_eden_used();
3740 size_t capacity = get_eden_capacity();
3741 assert(used <= capacity, "Unexpected state of Eden");
3742 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3743 _abort_preclean = true;
3744 }
3745 }
3746 }
3747
3748 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3749 assert(_collectorState == Precleaning ||
3750 _collectorState == AbortablePreclean, "incorrect state");
3751 ResourceMark rm;
3752 HandleMark hm;
3753
3754 // Precleaning is currently not MT but the reference processor
3755 // may be set for MT. Disable it temporarily here.
3756 ReferenceProcessor* rp = ref_processor();
3757 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3758
3759 // Do one pass of scrubbing the discovered reference lists
3760 // to remove any reference objects with strongly-reachable
3761 // referents.
3762 if (clean_refs) {
3763 CMSPrecleanRefsYieldClosure yield_cl(this);
3764 assert(_span_discoverer.span().equals(_span), "Spans should be equal");
3765 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3766 &_markStack, true /* preclean */);
3767 CMSDrainMarkingStackClosure complete_trace(this,
3768 _span, &_markBitMap, &_markStack,
3769 &keep_alive, true /* preclean */);
3770
3771 // We don't want this step to interfere with a young
3772 // collection because we don't want to take CPU
3773 // or memory bandwidth away from the young GC threads
3774 // (which may be as many as there are CPUs).
3775 // Note that we don't need to protect ourselves from
3776 // interference with mutators because they can't
3777 // manipulate the discovered reference lists nor affect
3778 // the computed reachability of the referents, the
3779 // only properties manipulated by the precleaning
3780 // of these reference lists.
3781 stopTimer();
3782 CMSTokenSyncWithLocks x(true /* is cms thread */,
3783 bitMapLock());
3784 startTimer();
5136 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5137 NOT_PRODUCT(num_steals++;)
5138 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
5139 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5140 // Do scanning work
5141 obj_to_scan->oop_iterate(keep_alive);
5142 // Loop around, finish this work, and try to steal some more
5143 } else if (terminator()->offer_termination()) {
5144 break; // nirvana from the infinite cycle
5145 }
5146 }
5147 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5148 }
5149
5150 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5151 {
5152 CMSHeap* heap = CMSHeap::heap();
5153 WorkGang* workers = heap->workers();
5154 assert(workers != NULL, "Need parallel worker threads.");
5155 CMSRefProcTaskProxy rp_task(task, &_collector,
5156 _collector.ref_processor_span(),
5157 _collector.markBitMap(),
5158 workers, _collector.task_queues());
5159 workers->run_task(&rp_task);
5160 }
5161
5162 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5163 {
5164
5165 CMSHeap* heap = CMSHeap::heap();
5166 WorkGang* workers = heap->workers();
5167 assert(workers != NULL, "Need parallel worker threads.");
5168 CMSRefEnqueueTaskProxy enq_task(task);
5169 workers->run_task(&enq_task);
5170 }
5171
5172 void CMSCollector::refProcessingWork() {
5173 ResourceMark rm;
5174 HandleMark hm;
5175
5176 ReferenceProcessor* rp = ref_processor();
5177 assert(_span_discoverer.span().equals(_span), "Spans should be equal");
5178 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5179 // Process weak references.
5180 rp->setup_policy(false);
5181 verify_work_stacks_empty();
5182
5183 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
5184 {
5185 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5186
5187 // Setup keep_alive and complete closures.
5188 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5189 &_markStack, false /* !preclean */);
5190 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5191 _span, &_markBitMap, &_markStack,
5192 &cmsKeepAliveClosure, false /* !preclean */);
5193
5194 ReferenceProcessorStats stats;
5195 if (rp->processing_is_mt()) {
5196 // Set the degree of MT here. If the discovery is done MT, there
5197 // may have been a different number of threads doing the discovery
|