< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 610   // Support for parallelizing survivor space rescan
 611   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 612     const size_t max_plab_samples =
 613       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 614 
 615     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 616     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 617     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 618     _survivor_chunk_capacity = max_plab_samples;
 619     for (uint i = 0; i < ParallelGCThreads; i++) {
 620       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 621       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 622       assert(cur->end() == 0, "Should be 0");
 623       assert(cur->array() == vec, "Should be vec");
 624       assert(cur->capacity() == max_plab_samples, "Error");
 625     }
 626   }
 627 
 628   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 629   _gc_counters = new CollectorCounters("CMS", 1);

 630   _completed_initialization = true;
 631   _inter_sweep_timer.start();  // start of time
 632 }
 633 
 634 const char* ConcurrentMarkSweepGeneration::name() const {
 635   return "concurrent mark-sweep generation";
 636 }
 637 void ConcurrentMarkSweepGeneration::update_counters() {
 638   if (UsePerfData) {
 639     _space_counters->update_all();
 640     _gen_counters->update_all();
 641   }
 642 }
 643 
 644 // this is an optimized version of update_counters(). it takes the
 645 // used value as a parameter rather than computing it.
 646 //
 647 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 648   if (UsePerfData) {
 649     _space_counters->update_used(used);


5534     size_policy()->reset_gc_overhead_limit_count();
5535     _collectorState = Idling;
5536   }
5537 
5538   register_gc_end();
5539 }
5540 
5541 // Same as above but for STW paths
5542 void CMSCollector::reset_stw() {
5543   // already have the lock
5544   assert(_collectorState == Resetting, "just checking");
5545   assert_lock_strong(bitMapLock());
5546   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5547   _markBitMap.clear_all();
5548   _collectorState = Idling;
5549   register_gc_end();
5550 }
5551 
5552 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5553   GCTraceCPUTime tcpu;
5554   TraceCollectorStats tcs(counters());

5555 
5556   switch (op) {
5557     case CMS_op_checkpointRootsInitial: {
5558       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5559       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5560       checkpointRootsInitial();
5561       break;
5562     }
5563     case CMS_op_checkpointRootsFinal: {
5564       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5565       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5566       checkpointRootsFinal();
5567       break;
5568     }
5569     default:
5570       fatal("No such CMS_op");
5571   }
5572 }
5573 
5574 #ifndef PRODUCT
5575 size_t const CMSCollector::skip_header_HeapWords() {
5576   return FreeChunk::header_size();
5577 }
5578 
5579 // Try and collect here conditions that should hold when
5580 // CMS thread is exiting. The idea is that the foreground GC
5581 // thread should not be blocked if it wants to terminate
5582 // the CMS thread and yet continue to run the VM for a while
5583 // after that.
5584 void CMSCollector::verify_ok_to_terminate() const {
5585   assert(Thread::current()->is_ConcurrentGC_thread(),




 610   // Support for parallelizing survivor space rescan
 611   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 612     const size_t max_plab_samples =
 613       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 614 
 615     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 616     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 617     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 618     _survivor_chunk_capacity = max_plab_samples;
 619     for (uint i = 0; i < ParallelGCThreads; i++) {
 620       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 621       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 622       assert(cur->end() == 0, "Should be 0");
 623       assert(cur->array() == vec, "Should be vec");
 624       assert(cur->capacity() == max_plab_samples, "Error");
 625     }
 626   }
 627 
 628   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 629   _gc_counters = new CollectorCounters("CMS", 1);
 630   _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
 631   _completed_initialization = true;
 632   _inter_sweep_timer.start();  // start of time
 633 }
 634 
 635 const char* ConcurrentMarkSweepGeneration::name() const {
 636   return "concurrent mark-sweep generation";
 637 }
 638 void ConcurrentMarkSweepGeneration::update_counters() {
 639   if (UsePerfData) {
 640     _space_counters->update_all();
 641     _gen_counters->update_all();
 642   }
 643 }
 644 
 645 // this is an optimized version of update_counters(). it takes the
 646 // used value as a parameter rather than computing it.
 647 //
 648 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 649   if (UsePerfData) {
 650     _space_counters->update_used(used);


5535     size_policy()->reset_gc_overhead_limit_count();
5536     _collectorState = Idling;
5537   }
5538 
5539   register_gc_end();
5540 }
5541 
5542 // Same as above but for STW paths
5543 void CMSCollector::reset_stw() {
5544   // already have the lock
5545   assert(_collectorState == Resetting, "just checking");
5546   assert_lock_strong(bitMapLock());
5547   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5548   _markBitMap.clear_all();
5549   _collectorState = Idling;
5550   register_gc_end();
5551 }
5552 
5553 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5554   GCTraceCPUTime tcpu;
5555   TraceCollectorStats tcs(EnableConcGCPerfCounter ? NULL : counters());
5556   TraceCollectorStats tcs_cgc(cgc_counters());
5557 
5558   switch (op) {
5559     case CMS_op_checkpointRootsInitial: {
5560       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5561       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5562       checkpointRootsInitial();
5563       break;
5564     }
5565     case CMS_op_checkpointRootsFinal: {
5566       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5567       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5568       checkpointRootsFinal();
5569       break;
5570     }
5571     default:
5572       fatal("No such CMS_op");
5573   }
5574 }
5575 
5576 #ifndef PRODUCT
5577 size_t const CMSCollector::skip_header_HeapWords() {
5578   return FreeChunk::header_size();
5579 }
5580 
5581 // Try and collect here conditions that should hold when
5582 // CMS thread is exiting. The idea is that the foreground GC
5583 // thread should not be blocked if it wants to terminate
5584 // the CMS thread and yet continue to run the VM for a while
5585 // after that.
5586 void CMSCollector::verify_ok_to_terminate() const {
5587   assert(Thread::current()->is_ConcurrentGC_thread(),


< prev index next >