src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 609   // Support for parallelizing survivor space rescan
 610   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 611     const size_t max_plab_samples =
 612       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 613 
 614     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 615     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 616     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 617     _survivor_chunk_capacity = max_plab_samples;
 618     for (uint i = 0; i < ParallelGCThreads; i++) {
 619       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 620       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 621       assert(cur->end() == 0, "Should be 0");
 622       assert(cur->array() == vec, "Should be vec");
 623       assert(cur->capacity() == max_plab_samples, "Error");
 624     }
 625   }
 626 
 627   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 628   _gc_counters = new CollectorCounters("CMS", 1);
 629   _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
 630   _completed_initialization = true;
 631   _inter_sweep_timer.start();  // start of time
 632 }
 633 
 634 const char* ConcurrentMarkSweepGeneration::name() const {
 635   return "concurrent mark-sweep generation";
 636 }
 637 void ConcurrentMarkSweepGeneration::update_counters() {
 638   if (UsePerfData) {
 639     _space_counters->update_all();
 640     _gen_counters->update_all();
 641   }
 642 }
 643 
 644 // this is an optimized version of update_counters(). it takes the
 645 // used value as a parameter rather than computing it.
 646 //
 647 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 648   if (UsePerfData) {
 649     _space_counters->update_used(used);


5530     size_policy()->reset_gc_overhead_limit_count();
5531     _collectorState = Idling;
5532   }
5533 
5534   register_gc_end();
5535 }
5536 
5537 // Same as above but for STW paths
5538 void CMSCollector::reset_stw() {
5539   // already have the lock
5540   assert(_collectorState == Resetting, "just checking");
5541   assert_lock_strong(bitMapLock());
5542   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5543   _markBitMap.clear_all();
5544   _collectorState = Idling;
5545   register_gc_end();
5546 }
5547 
5548 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5549   GCTraceCPUTime tcpu;
5550   TraceCollectorStats tcs(cgc_counters());
5551 
5552   switch (op) {
5553     case CMS_op_checkpointRootsInitial: {
5554       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5555       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5556       checkpointRootsInitial();
5557       break;
5558     }
5559     case CMS_op_checkpointRootsFinal: {
5560       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5561       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5562       checkpointRootsFinal();
5563       break;
5564     }
5565     default:
5566       fatal("No such CMS_op");
5567   }
5568 }
5569 
5570 #ifndef PRODUCT
5571 size_t const CMSCollector::skip_header_HeapWords() {
5572   return FreeChunk::header_size();
5573 }
5574 
5575 // Try and collect here conditions that should hold when
5576 // CMS thread is exiting. The idea is that the foreground GC
5577 // thread should not be blocked if it wants to terminate
5578 // the CMS thread and yet continue to run the VM for a while
5579 // after that.
5580 void CMSCollector::verify_ok_to_terminate() const {
5581   assert(Thread::current()->is_ConcurrentGC_thread(),




 609   // Support for parallelizing survivor space rescan
 610   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 611     const size_t max_plab_samples =
 612       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 613 
 614     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 615     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 616     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 617     _survivor_chunk_capacity = max_plab_samples;
 618     for (uint i = 0; i < ParallelGCThreads; i++) {
 619       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 620       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 621       assert(cur->end() == 0, "Should be 0");
 622       assert(cur->array() == vec, "Should be vec");
 623       assert(cur->capacity() == max_plab_samples, "Error");
 624     }
 625   }
 626 
 627   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 628   _gc_counters = new CollectorCounters("CMS", 1);

 629   _completed_initialization = true;
 630   _inter_sweep_timer.start();  // start of time
 631 }
 632 
 633 const char* ConcurrentMarkSweepGeneration::name() const {
 634   return "concurrent mark-sweep generation";
 635 }
 636 void ConcurrentMarkSweepGeneration::update_counters() {
 637   if (UsePerfData) {
 638     _space_counters->update_all();
 639     _gen_counters->update_all();
 640   }
 641 }
 642 
 643 // this is an optimized version of update_counters(). it takes the
 644 // used value as a parameter rather than computing it.
 645 //
 646 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 647   if (UsePerfData) {
 648     _space_counters->update_used(used);


5529     size_policy()->reset_gc_overhead_limit_count();
5530     _collectorState = Idling;
5531   }
5532 
5533   register_gc_end();
5534 }
5535 
5536 // Same as above but for STW paths
5537 void CMSCollector::reset_stw() {
5538   // already have the lock
5539   assert(_collectorState == Resetting, "just checking");
5540   assert_lock_strong(bitMapLock());
5541   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5542   _markBitMap.clear_all();
5543   _collectorState = Idling;
5544   register_gc_end();
5545 }
5546 
5547 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5548   GCTraceCPUTime tcpu;
5549   TraceCollectorStats tcs(counters());
5550 
5551   switch (op) {
5552     case CMS_op_checkpointRootsInitial: {
5553       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5554       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5555       checkpointRootsInitial();
5556       break;
5557     }
5558     case CMS_op_checkpointRootsFinal: {
5559       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5560       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5561       checkpointRootsFinal();
5562       break;
5563     }
5564     default:
5565       fatal("No such CMS_op");
5566   }
5567 }
5568 
5569 #ifndef PRODUCT
5570 size_t const CMSCollector::skip_header_HeapWords() {
5571   return FreeChunk::header_size();
5572 }
5573 
5574 // Try and collect here conditions that should hold when
5575 // CMS thread is exiting. The idea is that the foreground GC
5576 // thread should not be blocked if it wants to terminate
5577 // the CMS thread and yet continue to run the VM for a while
5578 // after that.
5579 void CMSCollector::verify_ok_to_terminate() const {
5580   assert(Thread::current()->is_ConcurrentGC_thread(),