602 assert(cmsThread()->collector() == this,
603 "CMS Thread should refer to this gen");
604 assert(CGC_lock != NULL, "Where's the CGC_lock?");
605
606 // Support for parallelizing young gen rescan
607 GenCollectedHeap* gch = GenCollectedHeap::heap();
608 assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
609 _young_gen = (ParNewGeneration*)gch->young_gen();
610 if (gch->supports_inline_contig_alloc()) {
611 _top_addr = gch->top_addr();
612 _end_addr = gch->end_addr();
613 assert(_young_gen != NULL, "no _young_gen");
614 _eden_chunk_index = 0;
615 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
616 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
617 }
618
619 // Support for parallelizing survivor space rescan
620 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
621 const size_t max_plab_samples =
622 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
623
624 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
625 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
626 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
627 _survivor_chunk_capacity = 2*max_plab_samples;
628 for (uint i = 0; i < ParallelGCThreads; i++) {
629 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
630 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
631 assert(cur->end() == 0, "Should be 0");
632 assert(cur->array() == vec, "Should be vec");
633 assert(cur->capacity() == max_plab_samples, "Error");
634 }
635 }
636
637 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
638 _gc_counters = new CollectorCounters("CMS", 1);
639 _completed_initialization = true;
640 _inter_sweep_timer.start(); // start of time
641 }
642
643 size_t CMSCollector::plab_sample_minimum_size() {
644 // The default value of MinTLABSize is 2k, but there is
645 // no way to get the default value if the flag has been overridden.
646 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
647 }
648
649 const char* ConcurrentMarkSweepGeneration::name() const {
650 return "concurrent mark-sweep generation";
651 }
652 void ConcurrentMarkSweepGeneration::update_counters() {
653 if (UsePerfData) {
654 _space_counters->update_all();
655 _gen_counters->update_all();
656 }
657 }
658
659 // this is an optimized version of update_counters(). it takes the
660 // used value as a parameter rather than computing it.
661 //
662 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
663 if (UsePerfData) {
664 _space_counters->update_used(used);
665 _space_counters->update_capacity();
666 _gen_counters->update_all();
|
602 assert(cmsThread()->collector() == this,
603 "CMS Thread should refer to this gen");
604 assert(CGC_lock != NULL, "Where's the CGC_lock?");
605
606 // Support for parallelizing young gen rescan
607 GenCollectedHeap* gch = GenCollectedHeap::heap();
608 assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
609 _young_gen = (ParNewGeneration*)gch->young_gen();
610 if (gch->supports_inline_contig_alloc()) {
611 _top_addr = gch->top_addr();
612 _end_addr = gch->end_addr();
613 assert(_young_gen != NULL, "no _young_gen");
614 _eden_chunk_index = 0;
615 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
616 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
617 }
618
619 // Support for parallelizing survivor space rescan
620 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
621 const size_t max_plab_samples =
622 ((DefNewGeneration*)_young_gen)->max_survivor_size() / YoungPLABSize;
623
624 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
625 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
626 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
627 _survivor_chunk_capacity = 2*max_plab_samples;
628 for (uint i = 0; i < ParallelGCThreads; i++) {
629 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
630 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
631 assert(cur->end() == 0, "Should be 0");
632 assert(cur->array() == vec, "Should be vec");
633 assert(cur->capacity() == max_plab_samples, "Error");
634 }
635 }
636
637 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
638 _gc_counters = new CollectorCounters("CMS", 1);
639 _completed_initialization = true;
640 _inter_sweep_timer.start(); // start of time
641 }
642
643 const char* ConcurrentMarkSweepGeneration::name() const {
644 return "concurrent mark-sweep generation";
645 }
646 void ConcurrentMarkSweepGeneration::update_counters() {
647 if (UsePerfData) {
648 _space_counters->update_all();
649 _gen_counters->update_all();
650 }
651 }
652
653 // this is an optimized version of update_counters(). it takes the
654 // used value as a parameter rather than computing it.
655 //
656 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
657 if (UsePerfData) {
658 _space_counters->update_used(used);
659 _space_counters->update_capacity();
660 _gen_counters->update_all();
|