< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 8053 : imported patch alloc


 191      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 192   CardGeneration(rs, initial_byte_size, level, ct),
 193   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 194   _did_compact(false)
 195 {
 196   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 197   HeapWord* end    = (HeapWord*) _virtual_space.high();
 198 
 199   _direct_allocated_words = 0;
 200   NOT_PRODUCT(
 201     _numObjectsPromoted = 0;
 202     _numWordsPromoted = 0;
 203     _numObjectsAllocated = 0;
 204     _numWordsAllocated = 0;
 205   )
 206 
 207   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 208                                            use_adaptive_freelists,
 209                                            dictionaryChoice);
 210   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 211   if (_cmsSpace == NULL) {
 212     vm_exit_during_initialization(
 213       "CompactibleFreeListSpace allocation failure");
 214   }
 215   _cmsSpace->_gen = this;
 216 
 217   _gc_stats = new CMSGCStats();
 218 
 219   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 220   // offsets match. The ability to tell free chunks from objects
 221   // depends on this property.
 222   debug_only(
 223     FreeChunk* junk = NULL;
 224     assert(UseCompressedClassPointers ||
 225            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 226            "Offset of FreeChunk::_prev within FreeChunk must match"
 227            "  that of OopDesc::_klass within OopDesc");
 228   )
 229   if (CollectedHeap::use_parallel_gc_threads()) {
 230     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 231     _par_gc_thread_states =
 232       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 233     if (_par_gc_thread_states == NULL) {
 234       vm_exit_during_initialization("Could not allocate par gc structs");
 235     }
 236     for (uint i = 0; i < ParallelGCThreads; i++) {
 237       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 238       if (_par_gc_thread_states[i] == NULL) {
 239         vm_exit_during_initialization("Could not allocate par gc structs");
 240       }
 241     }
 242   } else {
 243     _par_gc_thread_states = NULL;
 244   }
 245   _incremental_collection_failed = false;
 246   // The "dilatation_factor" is the expansion that can occur on
 247   // account of the fact that the minimum object size in the CMS
 248   // generation may be larger than that in, say, a contiguous young
 249   //  generation.
 250   // Ideally, in the calculation below, we'd compute the dilatation
 251   // factor as: MinChunkSize/(promoting_gen's min object size)
 252   // Since we do not have such a general query interface for the
 253   // promoting generation, we'll instead just use the minimum
 254   // object size (which today is a header's worth of space);
 255   // note that all arithmetic is in units of HeapWords.
 256   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 257   assert(_dilatation_factor >= 1.0, "from previous assert");
 258 }
 259 
 260 


 569   }
 570   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 571          "Inconsistency");
 572 
 573   // Parallel task queues; these are shared for the
 574   // concurrent and stop-world phases of CMS, but
 575   // are not shared with parallel scavenge (ParNew).
 576   {
 577     uint i;
 578     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 579 
 580     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 581          || ParallelRefProcEnabled)
 582         && num_queues > 0) {
 583       _task_queues = new OopTaskQueueSet(num_queues);
 584       if (_task_queues == NULL) {
 585         warning("task_queues allocation failure.");
 586         return;
 587       }
 588       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 589       if (_hash_seed == NULL) {
 590         warning("_hash_seed array allocation failure");
 591         return;
 592       }
 593 
 594       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 595       for (i = 0; i < num_queues; i++) {
 596         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 597         if (q == NULL) {
 598           warning("work_queue allocation failure.");
 599           return;
 600         }
 601         _task_queues->register_queue(i, q);
 602       }
 603       for (i = 0; i < num_queues; i++) {
 604         _task_queues->queue(i)->initialize();
 605         _hash_seed[i] = 17;  // copied from ParNew
 606       }
 607     }
 608   }
 609 
 610   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 611 
 612   // Clip CMSBootstrapOccupancy between 0 and 100.
 613   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;


 616   ConcurrentMarkSweepGeneration::set_collector(this);
 617 
 618   // Create & start a CMS thread for this CMS collector
 619   _cmsThread = ConcurrentMarkSweepThread::start(this);
 620   assert(cmsThread() != NULL, "CMS Thread should have been created");
 621   assert(cmsThread()->collector() == this,
 622          "CMS Thread should refer to this gen");
 623   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 624 
 625   // Support for parallelizing young gen rescan
 626   GenCollectedHeap* gch = GenCollectedHeap::heap();
 627   assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 628   _young_gen = (ParNewGeneration*)gch->young_gen();
 629   if (gch->supports_inline_contig_alloc()) {
 630     _top_addr = gch->top_addr();
 631     _end_addr = gch->end_addr();
 632     assert(_young_gen != NULL, "no _young_gen");
 633     _eden_chunk_index = 0;
 634     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 635     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 636     if (_eden_chunk_array == NULL) {
 637       _eden_chunk_capacity = 0;
 638       warning("GC/CMS: _eden_chunk_array allocation failure");
 639     }
 640   }
 641   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 642 
 643   // Support for parallelizing survivor space rescan
 644   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 645     const size_t max_plab_samples =
 646       ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
 647 
 648     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 649     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 650     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 651     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 652         || _cursor == NULL) {
 653       warning("Failed to allocate survivor plab/chunk array");
 654       if (_survivor_plab_array  != NULL) {
 655         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
 656         _survivor_plab_array = NULL;
 657       }
 658       if (_survivor_chunk_array != NULL) {
 659         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
 660         _survivor_chunk_array = NULL;
 661       }
 662       if (_cursor != NULL) {
 663         FREE_C_HEAP_ARRAY(size_t, _cursor);
 664         _cursor = NULL;
 665       }
 666     } else {
 667       _survivor_chunk_capacity = 2*max_plab_samples;
 668       for (uint i = 0; i < ParallelGCThreads; i++) {
 669         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 670         if (vec == NULL) {
 671           warning("Failed to allocate survivor plab array");
 672           for (int j = i; j > 0; j--) {
 673             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
 674           }
 675           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
 676           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
 677           _survivor_plab_array = NULL;
 678           _survivor_chunk_array = NULL;
 679           _survivor_chunk_capacity = 0;
 680           break;
 681         } else {
 682           ChunkArray* cur =
 683             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 684                                                         max_plab_samples);
 685           assert(cur->end() == 0, "Should be 0");
 686           assert(cur->array() == vec, "Should be vec");
 687           assert(cur->capacity() == max_plab_samples, "Error");
 688         }
 689       }
 690     }
 691   }
 692   assert(   (   _survivor_plab_array  != NULL
 693              && _survivor_chunk_array != NULL)
 694          || (   _survivor_chunk_capacity == 0
 695              && _survivor_chunk_index == 0),
 696          "Error");
 697 
 698   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 699   _gc_counters = new CollectorCounters("CMS", 1);
 700   _completed_initialization = true;
 701   _inter_sweep_timer.start();  // start of time
 702 }
 703 
 704 size_t CMSCollector::plab_sample_minimum_size() {
 705   // The default value of MinTLABSize is 2k, but there is
 706   // no way to get the default value if the flag has been overridden.
 707   return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
 708 }
 709 
 710 const char* ConcurrentMarkSweepGeneration::name() const {
 711   return "concurrent mark-sweep generation";
 712 }
 713 void ConcurrentMarkSweepGeneration::update_counters() {
 714   if (UsePerfData) {
 715     _space_counters->update_all();
 716     _gen_counters->update_all();




 191      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 192   CardGeneration(rs, initial_byte_size, level, ct),
 193   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 194   _did_compact(false)
 195 {
 196   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 197   HeapWord* end    = (HeapWord*) _virtual_space.high();
 198 
 199   _direct_allocated_words = 0;
 200   NOT_PRODUCT(
 201     _numObjectsPromoted = 0;
 202     _numWordsPromoted = 0;
 203     _numObjectsAllocated = 0;
 204     _numWordsAllocated = 0;
 205   )
 206 
 207   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 208                                            use_adaptive_freelists,
 209                                            dictionaryChoice);
 210   NOT_PRODUCT(debug_cms_space = _cmsSpace;)




 211   _cmsSpace->_gen = this;
 212 
 213   _gc_stats = new CMSGCStats();
 214 
 215   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 216   // offsets match. The ability to tell free chunks from objects
 217   // depends on this property.
 218   debug_only(
 219     FreeChunk* junk = NULL;
 220     assert(UseCompressedClassPointers ||
 221            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 222            "Offset of FreeChunk::_prev within FreeChunk must match"
 223            "  that of OopDesc::_klass within OopDesc");
 224   )
 225   if (CollectedHeap::use_parallel_gc_threads()) {
 226     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 227     _par_gc_thread_states =
 228       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);



 229     for (uint i = 0; i < ParallelGCThreads; i++) {
 230       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());



 231     }
 232   } else {
 233     _par_gc_thread_states = NULL;
 234   }
 235   _incremental_collection_failed = false;
 236   // The "dilatation_factor" is the expansion that can occur on
 237   // account of the fact that the minimum object size in the CMS
 238   // generation may be larger than that in, say, a contiguous young
 239   //  generation.
 240   // Ideally, in the calculation below, we'd compute the dilatation
 241   // factor as: MinChunkSize/(promoting_gen's min object size)
 242   // Since we do not have such a general query interface for the
 243   // promoting generation, we'll instead just use the minimum
 244   // object size (which today is a header's worth of space);
 245   // note that all arithmetic is in units of HeapWords.
 246   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 247   assert(_dilatation_factor >= 1.0, "from previous assert");
 248 }
 249 
 250 


 559   }
 560   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 561          "Inconsistency");
 562 
 563   // Parallel task queues; these are shared for the
 564   // concurrent and stop-world phases of CMS, but
 565   // are not shared with parallel scavenge (ParNew).
 566   {
 567     uint i;
 568     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 569 
 570     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 571          || ParallelRefProcEnabled)
 572         && num_queues > 0) {
 573       _task_queues = new OopTaskQueueSet(num_queues);
 574       if (_task_queues == NULL) {
 575         warning("task_queues allocation failure.");
 576         return;
 577       }
 578       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);





 579       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 580       for (i = 0; i < num_queues; i++) {
 581         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 582         if (q == NULL) {
 583           warning("work_queue allocation failure.");
 584           return;
 585         }
 586         _task_queues->register_queue(i, q);
 587       }
 588       for (i = 0; i < num_queues; i++) {
 589         _task_queues->queue(i)->initialize();
 590         _hash_seed[i] = 17;  // copied from ParNew
 591       }
 592     }
 593   }
 594 
 595   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 596 
 597   // Clip CMSBootstrapOccupancy between 0 and 100.
 598   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;


 601   ConcurrentMarkSweepGeneration::set_collector(this);
 602 
 603   // Create & start a CMS thread for this CMS collector
 604   _cmsThread = ConcurrentMarkSweepThread::start(this);
 605   assert(cmsThread() != NULL, "CMS Thread should have been created");
 606   assert(cmsThread()->collector() == this,
 607          "CMS Thread should refer to this gen");
 608   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 609 
 610   // Support for parallelizing young gen rescan
 611   GenCollectedHeap* gch = GenCollectedHeap::heap();
 612   assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 613   _young_gen = (ParNewGeneration*)gch->young_gen();
 614   if (gch->supports_inline_contig_alloc()) {
 615     _top_addr = gch->top_addr();
 616     _end_addr = gch->end_addr();
 617     assert(_young_gen != NULL, "no _young_gen");
 618     _eden_chunk_index = 0;
 619     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 620     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);



 621   }


 622 
 623   // Support for parallelizing survivor space rescan
 624   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 625     const size_t max_plab_samples =
 626       ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
 627 
 628     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 629     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 630     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
















 631     _survivor_chunk_capacity = 2*max_plab_samples;
 632     for (uint i = 0; i < ParallelGCThreads; i++) {
 633       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 634       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);














 635       assert(cur->end() == 0, "Should be 0");
 636       assert(cur->array() == vec, "Should be vec");
 637       assert(cur->capacity() == max_plab_samples, "Error");
 638     }
 639   }







 640 
 641   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 642   _gc_counters = new CollectorCounters("CMS", 1);
 643   _completed_initialization = true;
 644   _inter_sweep_timer.start();  // start of time
 645 }
 646 
 647 size_t CMSCollector::plab_sample_minimum_size() {
 648   // The default value of MinTLABSize is 2k, but there is
 649   // no way to get the default value if the flag has been overridden.
 650   return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
 651 }
 652 
 653 const char* ConcurrentMarkSweepGeneration::name() const {
 654   return "concurrent mark-sweep generation";
 655 }
 656 void ConcurrentMarkSweepGeneration::update_counters() {
 657   if (UsePerfData) {
 658     _space_counters->update_all();
 659     _gen_counters->update_all();


< prev index next >