< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 608   // Support for parallelizing survivor space rescan
 609   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 610     const size_t max_plab_samples =
 611       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 612 
 613     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 614     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 615     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 616     _survivor_chunk_capacity = max_plab_samples;
 617     for (uint i = 0; i < ParallelGCThreads; i++) {
 618       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 619       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 620       assert(cur->end() == 0, "Should be 0");
 621       assert(cur->array() == vec, "Should be vec");
 622       assert(cur->capacity() == max_plab_samples, "Error");
 623     }
 624   }
 625 
 626   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 627   _gc_counters = new CollectorCounters("CMS", 1);

 628   _completed_initialization = true;
 629   _inter_sweep_timer.start();  // start of time
 630 }
 631 
 632 const char* ConcurrentMarkSweepGeneration::name() const {
 633   return "concurrent mark-sweep generation";
 634 }
 635 void ConcurrentMarkSweepGeneration::update_counters() {
 636   if (UsePerfData) {
 637     _space_counters->update_all();
 638     _gen_counters->update_all();
 639   }
 640 }
 641 
 642 // this is an optimized version of update_counters(). it takes the
 643 // used value as a parameter rather than computing it.
 644 //
 645 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 646   if (UsePerfData) {
 647     _space_counters->update_used(used);


5533     size_policy()->reset_gc_overhead_limit_count();
5534     _collectorState = Idling;
5535   }
5536 
5537   register_gc_end();
5538 }
5539 
5540 // Same as above but for STW paths
5541 void CMSCollector::reset_stw() {
5542   // already have the lock
5543   assert(_collectorState == Resetting, "just checking");
5544   assert_lock_strong(bitMapLock());
5545   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5546   _markBitMap.clear_all();
5547   _collectorState = Idling;
5548   register_gc_end();
5549 }
5550 
5551 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5552   GCTraceCPUTime tcpu;
5553   TraceCollectorStats tcs(counters());

5554 
5555   switch (op) {
5556     case CMS_op_checkpointRootsInitial: {
5557       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5558       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5559       checkpointRootsInitial();
5560       break;
5561     }
5562     case CMS_op_checkpointRootsFinal: {
5563       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5564       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5565       checkpointRootsFinal();
5566       break;
5567     }
5568     default:
5569       fatal("No such CMS_op");
5570   }
5571 }
5572 
5573 #ifndef PRODUCT
5574 size_t const CMSCollector::skip_header_HeapWords() {
5575   return FreeChunk::header_size();
5576 }
5577 
5578 // Try and collect here conditions that should hold when
5579 // CMS thread is exiting. The idea is that the foreground GC
5580 // thread should not be blocked if it wants to terminate
5581 // the CMS thread and yet continue to run the VM for a while
5582 // after that.
5583 void CMSCollector::verify_ok_to_terminate() const {
5584   assert(Thread::current()->is_ConcurrentGC_thread(),




 608   // Support for parallelizing survivor space rescan
 609   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 610     const size_t max_plab_samples =
 611       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 612 
 613     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 614     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 615     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 616     _survivor_chunk_capacity = max_plab_samples;
 617     for (uint i = 0; i < ParallelGCThreads; i++) {
 618       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 619       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 620       assert(cur->end() == 0, "Should be 0");
 621       assert(cur->array() == vec, "Should be vec");
 622       assert(cur->capacity() == max_plab_samples, "Error");
 623     }
 624   }
 625 
 626   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 627   _gc_counters = new CollectorCounters("CMS", 1);
 628   _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
 629   _completed_initialization = true;
 630   _inter_sweep_timer.start();  // start of time
 631 }
 632 
 633 const char* ConcurrentMarkSweepGeneration::name() const {
 634   return "concurrent mark-sweep generation";
 635 }
 636 void ConcurrentMarkSweepGeneration::update_counters() {
 637   if (UsePerfData) {
 638     _space_counters->update_all();
 639     _gen_counters->update_all();
 640   }
 641 }
 642 
 643 // this is an optimized version of update_counters(). it takes the
 644 // used value as a parameter rather than computing it.
 645 //
 646 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 647   if (UsePerfData) {
 648     _space_counters->update_used(used);


5534     size_policy()->reset_gc_overhead_limit_count();
5535     _collectorState = Idling;
5536   }
5537 
5538   register_gc_end();
5539 }
5540 
5541 // Same as above but for STW paths
5542 void CMSCollector::reset_stw() {
5543   // already have the lock
5544   assert(_collectorState == Resetting, "just checking");
5545   assert_lock_strong(bitMapLock());
5546   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5547   _markBitMap.clear_all();
5548   _collectorState = Idling;
5549   register_gc_end();
5550 }
5551 
5552 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5553   GCTraceCPUTime tcpu;
5554   TraceCollectorStats tcs(EnableConcGCPerfCounter ? NULL : counters());
5555   TraceCollectorStats tcs_cgc(cgc_counters());
5556 
5557   switch (op) {
5558     case CMS_op_checkpointRootsInitial: {
5559       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5560       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5561       checkpointRootsInitial();
5562       break;
5563     }
5564     case CMS_op_checkpointRootsFinal: {
5565       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5566       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5567       checkpointRootsFinal();
5568       break;
5569     }
5570     default:
5571       fatal("No such CMS_op");
5572   }
5573 }
5574 
5575 #ifndef PRODUCT
5576 size_t const CMSCollector::skip_header_HeapWords() {
5577   return FreeChunk::header_size();
5578 }
5579 
5580 // Try and collect here conditions that should hold when
5581 // CMS thread is exiting. The idea is that the foreground GC
5582 // thread should not be blocked if it wants to terminate
5583 // the CMS thread and yet continue to run the VM for a while
5584 // after that.
5585 void CMSCollector::verify_ok_to_terminate() const {
5586   assert(Thread::current()->is_ConcurrentGC_thread(),


< prev index next >