272 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
273 assert(io <= 100 && tr <= 100, "Check the arguments");
274 if (io >= 0) {
275 _initiating_occupancy = (double)io / 100.0;
276 } else {
277 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
278 (double)(tr * MinHeapFreeRatio) / 100.0)
279 / 100.0;
280 }
281 }
282
283 void ConcurrentMarkSweepGeneration::ref_processor_init() {
284 assert(collector() != NULL, "no collector");
285 collector()->ref_processor_init();
286 }
287
288 void CMSCollector::ref_processor_init() {
289 if (_ref_processor == NULL) {
290 // Allocate and initialize a reference processor
291 _ref_processor =
292 new ReferenceProcessor(_span, // span
293 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
294 ParallelGCThreads, // mt processing degree
295 _cmsGen->refs_discovery_is_mt(), // mt discovery
296 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
297 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
298 &_is_alive_closure); // closure for liveness info
299 // Initialize the _ref_processor field of CMSGen
300 _cmsGen->set_ref_processor(_ref_processor);
301
302 }
303 }
304
305 AdaptiveSizePolicy* CMSCollector::size_policy() {
306 return CMSHeap::heap()->size_policy();
307 }
308
309 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
310
311 const char* gen_name = "old";
312 GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
3726 // initialized. So we'll instead do the check when we _use_ this sample
3727 // later.
3728 if (_eden_chunk_index == 0 ||
3729 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3730 _eden_chunk_array[_eden_chunk_index-1])
3731 >= CMSSamplingGrain)) {
3732 _eden_chunk_index++; // commit sample
3733 }
3734 }
3735 }
3736 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3737 size_t used = get_eden_used();
3738 size_t capacity = get_eden_capacity();
3739 assert(used <= capacity, "Unexpected state of Eden");
3740 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3741 _abort_preclean = true;
3742 }
3743 }
3744 }
3745
3746
3747 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3748 assert(_collectorState == Precleaning ||
3749 _collectorState == AbortablePreclean, "incorrect state");
3750 ResourceMark rm;
3751 HandleMark hm;
3752
3753 // Precleaning is currently not MT but the reference processor
3754 // may be set for MT. Disable it temporarily here.
3755 ReferenceProcessor* rp = ref_processor();
3756 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3757
3758 // Do one pass of scrubbing the discovered reference lists
3759 // to remove any reference objects with strongly-reachable
3760 // referents.
3761 if (clean_refs) {
3762 CMSPrecleanRefsYieldClosure yield_cl(this);
3763 assert(rp->span().equals(_span), "Spans should be equal");
3764 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3765 &_markStack, true /* preclean */);
3766 CMSDrainMarkingStackClosure complete_trace(this,
3767 _span, &_markBitMap, &_markStack,
3768 &keep_alive, true /* preclean */);
3769
3770 // We don't want this step to interfere with a young
3771 // collection because we don't want to take CPU
3772 // or memory bandwidth away from the young GC threads
3773 // (which may be as many as there are CPUs).
3774 // Note that we don't need to protect ourselves from
3775 // interference with mutators because they can't
5155 _collector.ref_processor()->span(),
5156 _collector.markBitMap(),
5157 workers, _collector.task_queues());
5158 workers->run_task(&rp_task);
5159 }
5160
5161 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5162 {
5163
5164 CMSHeap* heap = CMSHeap::heap();
5165 WorkGang* workers = heap->workers();
5166 assert(workers != NULL, "Need parallel worker threads.");
5167 CMSRefEnqueueTaskProxy enq_task(task);
5168 workers->run_task(&enq_task);
5169 }
5170
5171 void CMSCollector::refProcessingWork() {
5172 ResourceMark rm;
5173 HandleMark hm;
5174
5175 ReferenceProcessor* rp = ref_processor();
5176 assert(rp->span().equals(_span), "Spans should be equal");
5177 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5178 // Process weak references.
5179 rp->setup_policy(false);
5180 verify_work_stacks_empty();
5181
5182 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
5183 {
5184 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5185
5186 // Setup keep_alive and complete closures.
5187 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5188 &_markStack, false /* !preclean */);
5189 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5190 _span, &_markBitMap, &_markStack,
5191 &cmsKeepAliveClosure, false /* !preclean */);
5192
5193 ReferenceProcessorStats stats;
5194 if (rp->processing_is_mt()) {
5195 // Set the degree of MT here. If the discovery is done MT, there
|
272 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
273 assert(io <= 100 && tr <= 100, "Check the arguments");
274 if (io >= 0) {
275 _initiating_occupancy = (double)io / 100.0;
276 } else {
277 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
278 (double)(tr * MinHeapFreeRatio) / 100.0)
279 / 100.0;
280 }
281 }
282
283 void ConcurrentMarkSweepGeneration::ref_processor_init() {
284 assert(collector() != NULL, "no collector");
285 collector()->ref_processor_init();
286 }
287
288 void CMSCollector::ref_processor_init() {
289 if (_ref_processor == NULL) {
290 // Allocate and initialize a reference processor
291 _ref_processor =
292 new SpanReferenceProcessor(_span, // span
293 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
294 ParallelGCThreads, // mt processing degree
295 _cmsGen->refs_discovery_is_mt(), // mt discovery
296 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
297 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
298 &_is_alive_closure); // closure for liveness info
299 // Initialize the _ref_processor field of CMSGen
300 _cmsGen->set_ref_processor(_ref_processor);
301
302 }
303 }
304
305 AdaptiveSizePolicy* CMSCollector::size_policy() {
306 return CMSHeap::heap()->size_policy();
307 }
308
309 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
310
311 const char* gen_name = "old";
312 GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
3726 // initialized. So we'll instead do the check when we _use_ this sample
3727 // later.
3728 if (_eden_chunk_index == 0 ||
3729 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3730 _eden_chunk_array[_eden_chunk_index-1])
3731 >= CMSSamplingGrain)) {
3732 _eden_chunk_index++; // commit sample
3733 }
3734 }
3735 }
3736 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3737 size_t used = get_eden_used();
3738 size_t capacity = get_eden_capacity();
3739 assert(used <= capacity, "Unexpected state of Eden");
3740 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3741 _abort_preclean = true;
3742 }
3743 }
3744 }
3745
3746 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3747 assert(_collectorState == Precleaning ||
3748 _collectorState == AbortablePreclean, "incorrect state");
3749 ResourceMark rm;
3750 HandleMark hm;
3751
3752 // Precleaning is currently not MT but the reference processor
3753 // may be set for MT. Disable it temporarily here.
3754 SpanReferenceProcessor* rp = ref_processor();
3755 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3756
3757 // Do one pass of scrubbing the discovered reference lists
3758 // to remove any reference objects with strongly-reachable
3759 // referents.
3760 if (clean_refs) {
3761 CMSPrecleanRefsYieldClosure yield_cl(this);
3762 assert(rp->span().equals(_span), "Spans should be equal");
3763 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3764 &_markStack, true /* preclean */);
3765 CMSDrainMarkingStackClosure complete_trace(this,
3766 _span, &_markBitMap, &_markStack,
3767 &keep_alive, true /* preclean */);
3768
3769 // We don't want this step to interfere with a young
3770 // collection because we don't want to take CPU
3771 // or memory bandwidth away from the young GC threads
3772 // (which may be as many as there are CPUs).
3773 // Note that we don't need to protect ourselves from
3774 // interference with mutators because they can't
5154 _collector.ref_processor()->span(),
5155 _collector.markBitMap(),
5156 workers, _collector.task_queues());
5157 workers->run_task(&rp_task);
5158 }
5159
5160 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5161 {
5162
5163 CMSHeap* heap = CMSHeap::heap();
5164 WorkGang* workers = heap->workers();
5165 assert(workers != NULL, "Need parallel worker threads.");
5166 CMSRefEnqueueTaskProxy enq_task(task);
5167 workers->run_task(&enq_task);
5168 }
5169
5170 void CMSCollector::refProcessingWork() {
5171 ResourceMark rm;
5172 HandleMark hm;
5173
5174 SpanReferenceProcessor* rp = ref_processor();
5175 assert(rp->span().equals(_span), "Spans should be equal");
5176 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5177 // Process weak references.
5178 rp->setup_policy(false);
5179 verify_work_stacks_empty();
5180
5181 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
5182 {
5183 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5184
5185 // Setup keep_alive and complete closures.
5186 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5187 &_markStack, false /* !preclean */);
5188 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5189 _span, &_markBitMap, &_markStack,
5190 &cmsKeepAliveClosure, false /* !preclean */);
5191
5192 ReferenceProcessorStats stats;
5193 if (rp->processing_is_mt()) {
5194 // Set the degree of MT here. If the discovery is done MT, there
|