609
610 // Support for parallelizing survivor space rescan
611 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
612 const size_t max_plab_samples =
613 _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
614
615 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
616 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
617 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
618 _survivor_chunk_capacity = max_plab_samples;
619 for (uint i = 0; i < ParallelGCThreads; i++) {
620 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
621 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
622 assert(cur->end() == 0, "Should be 0");
623 assert(cur->array() == vec, "Should be vec");
624 assert(cur->capacity() == max_plab_samples, "Error");
625 }
626 }
627
628 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
629 _gc_counters = new CollectorCounters("CMS", 1);
630 _completed_initialization = true;
631 _inter_sweep_timer.start(); // start of time
632 }
633
634 const char* ConcurrentMarkSweepGeneration::name() const {
635 return "concurrent mark-sweep generation";
636 }
637 void ConcurrentMarkSweepGeneration::update_counters() {
638 if (UsePerfData) {
639 _space_counters->update_all();
640 _gen_counters->update_all();
641 }
642 }
643
644 // this is an optimized version of update_counters(). it takes the
645 // used value as a parameter rather than computing it.
646 //
647 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
648 if (UsePerfData) {
649 _space_counters->update_used(used);
5533 size_policy()->reset_gc_overhead_limit_count();
5534 _collectorState = Idling;
5535 }
5536
5537 register_gc_end();
5538 }
5539
5540 // Same as above but for STW paths
5541 void CMSCollector::reset_stw() {
5542 // already have the lock
5543 assert(_collectorState == Resetting, "just checking");
5544 assert_lock_strong(bitMapLock());
5545 GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5546 _markBitMap.clear_all();
5547 _collectorState = Idling;
5548 register_gc_end();
5549 }
5550
5551 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5552 GCTraceCPUTime tcpu;
5553 TraceCollectorStats tcs(counters());
5554
5555 switch (op) {
5556 case CMS_op_checkpointRootsInitial: {
5557 GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5558 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5559 checkpointRootsInitial();
5560 break;
5561 }
5562 case CMS_op_checkpointRootsFinal: {
5563 GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5564 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5565 checkpointRootsFinal();
5566 break;
5567 }
5568 default:
5569 fatal("No such CMS_op");
5570 }
5571 }
5572
5573 #ifndef PRODUCT
5574 size_t const CMSCollector::skip_header_HeapWords() {
5575 return FreeChunk::header_size();
5576 }
5577
5578 // Try and collect here conditions that should hold when
5579 // CMS thread is exiting. The idea is that the foreground GC
5580 // thread should not be blocked if it wants to terminate
5581 // the CMS thread and yet continue to run the VM for a while
5582 // after that.
5583 void CMSCollector::verify_ok_to_terminate() const {
5584 assert(Thread::current()->is_ConcurrentGC_thread(),
|
609
610 // Support for parallelizing survivor space rescan
611 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
612 const size_t max_plab_samples =
613 _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
614
615 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
616 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
617 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
618 _survivor_chunk_capacity = max_plab_samples;
619 for (uint i = 0; i < ParallelGCThreads; i++) {
620 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
621 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
622 assert(cur->end() == 0, "Should be 0");
623 assert(cur->array() == vec, "Should be vec");
624 assert(cur->capacity() == max_plab_samples, "Error");
625 }
626 }
627
628 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
629 _gc_counters = new CollectorCounters("CMS stop-the-world full collections", 1);
630 _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
631 _completed_initialization = true;
632 _inter_sweep_timer.start(); // start of time
633 }
634
635 const char* ConcurrentMarkSweepGeneration::name() const {
636 return "concurrent mark-sweep generation";
637 }
638 void ConcurrentMarkSweepGeneration::update_counters() {
639 if (UsePerfData) {
640 _space_counters->update_all();
641 _gen_counters->update_all();
642 }
643 }
644
645 // this is an optimized version of update_counters(). it takes the
646 // used value as a parameter rather than computing it.
647 //
648 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
649 if (UsePerfData) {
650 _space_counters->update_used(used);
5534 size_policy()->reset_gc_overhead_limit_count();
5535 _collectorState = Idling;
5536 }
5537
5538 register_gc_end();
5539 }
5540
5541 // Same as above but for STW paths
5542 void CMSCollector::reset_stw() {
5543 // already have the lock
5544 assert(_collectorState == Resetting, "just checking");
5545 assert_lock_strong(bitMapLock());
5546 GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5547 _markBitMap.clear_all();
5548 _collectorState = Idling;
5549 register_gc_end();
5550 }
5551
5552 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5553 GCTraceCPUTime tcpu;
5554 TraceCollectorStats tcs(cgc_counters());
5555
5556 switch (op) {
5557 case CMS_op_checkpointRootsInitial: {
5558 GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5559 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5560 checkpointRootsInitial();
5561 break;
5562 }
5563 case CMS_op_checkpointRootsFinal: {
5564 GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5565 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5566 checkpointRootsFinal();
5567 break;
5568 }
5569 default:
5570 fatal("No such CMS_op");
5571 }
5572 }
5573
5574 #ifndef PRODUCT
5575 size_t const CMSCollector::skip_header_HeapWords() {
5576 return FreeChunk::header_size();
5577 }
5578
5579 // Try and collect here conditions that should hold when
5580 // CMS thread is exiting. The idea is that the foreground GC
5581 // thread should not be blocked if it wants to terminate
5582 // the CMS thread and yet continue to run the VM for a while
5583 // after that.
5584 void CMSCollector::verify_ok_to_terminate() const {
5585 assert(Thread::current()->is_ConcurrentGC_thread(),
|