< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page


   1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 612   // Support for parallelizing survivor space rescan
 613   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 614     const size_t max_plab_samples =
 615       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 616 
 617     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 618     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 619     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 620     _survivor_chunk_capacity = max_plab_samples;
 621     for (uint i = 0; i < ParallelGCThreads; i++) {
 622       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 623       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 624       assert(cur->end() == 0, "Should be 0");
 625       assert(cur->array() == vec, "Should be vec");
 626       assert(cur->capacity() == max_plab_samples, "Error");
 627     }
 628   }
 629 
 630   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 631   _gc_counters = new CollectorCounters("CMS", 1);

 632   _completed_initialization = true;
 633   _inter_sweep_timer.start();  // start of time
 634 }
 635 
 636 const char* ConcurrentMarkSweepGeneration::name() const {
 637   return "concurrent mark-sweep generation";
 638 }
 639 void ConcurrentMarkSweepGeneration::update_counters() {
 640   if (UsePerfData) {
 641     _space_counters->update_all();
 642     _gen_counters->update_all();
 643   }
 644 }
 645 
 646 // this is an optimized version of update_counters(). it takes the
 647 // used value as a parameter rather than computing it.
 648 //
 649 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 650   if (UsePerfData) {
 651     _space_counters->update_used(used);


5543     _collectorState = Idling;
5544   }
5545 
5546   register_gc_end();
5547 }
5548 
5549 // Same as above but for STW paths
5550 void CMSCollector::reset_stw() {
5551   // already have the lock
5552   assert(_collectorState == Resetting, "just checking");
5553   assert_lock_strong(bitMapLock());
5554   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5555   _markBitMap.clear_all();
5556   _collectorState = Idling;
5557   register_gc_end();
5558 }
5559 
5560 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5561   GCTraceCPUTime tcpu;
5562   TraceCollectorStats tcs(counters());

5563 
5564   switch (op) {
5565     case CMS_op_checkpointRootsInitial: {
5566       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5567       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5568       checkpointRootsInitial();
5569       break;
5570     }
5571     case CMS_op_checkpointRootsFinal: {
5572       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5573       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5574       checkpointRootsFinal();
5575       break;
5576     }
5577     default:
5578       fatal("No such CMS_op");
5579   }
5580 }
5581 
5582 #ifndef PRODUCT
5583 size_t const CMSCollector::skip_header_HeapWords() {
5584   return FreeChunk::header_size();
5585 }
5586 
5587 // Try and collect here conditions that should hold when
5588 // CMS thread is exiting. The idea is that the foreground GC
5589 // thread should not be blocked if it wants to terminate
5590 // the CMS thread and yet continue to run the VM for a while
5591 // after that.
5592 void CMSCollector::verify_ok_to_terminate() const {
5593   assert(Thread::current()->is_ConcurrentGC_thread(),


   1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 612   // Support for parallelizing survivor space rescan
 613   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 614     const size_t max_plab_samples =
 615       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 616 
 617     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 618     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 619     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 620     _survivor_chunk_capacity = max_plab_samples;
 621     for (uint i = 0; i < ParallelGCThreads; i++) {
 622       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 623       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 624       assert(cur->end() == 0, "Should be 0");
 625       assert(cur->array() == vec, "Should be vec");
 626       assert(cur->capacity() == max_plab_samples, "Error");
 627     }
 628   }
 629 
 630   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 631   _gc_counters = new CollectorCounters("CMS", 1);
 632   _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
 633   _completed_initialization = true;
 634   _inter_sweep_timer.start();  // start of time
 635 }
 636 
 637 const char* ConcurrentMarkSweepGeneration::name() const {
 638   return "concurrent mark-sweep generation";
 639 }
 640 void ConcurrentMarkSweepGeneration::update_counters() {
 641   if (UsePerfData) {
 642     _space_counters->update_all();
 643     _gen_counters->update_all();
 644   }
 645 }
 646 
 647 // this is an optimized version of update_counters(). it takes the
 648 // used value as a parameter rather than computing it.
 649 //
 650 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 651   if (UsePerfData) {
 652     _space_counters->update_used(used);


5544     _collectorState = Idling;
5545   }
5546 
5547   register_gc_end();
5548 }
5549 
5550 // Same as above but for STW paths
5551 void CMSCollector::reset_stw() {
5552   // already have the lock
5553   assert(_collectorState == Resetting, "just checking");
5554   assert_lock_strong(bitMapLock());
5555   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5556   _markBitMap.clear_all();
5557   _collectorState = Idling;
5558   register_gc_end();
5559 }
5560 
5561 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5562   GCTraceCPUTime tcpu;
5563   TraceCollectorStats tcs(counters());
5564   TraceCollectorStats tcs_cgc(cgc_counters());
5565 
5566   switch (op) {
5567     case CMS_op_checkpointRootsInitial: {
5568       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5569       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5570       checkpointRootsInitial();
5571       break;
5572     }
5573     case CMS_op_checkpointRootsFinal: {
5574       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5575       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5576       checkpointRootsFinal();
5577       break;
5578     }
5579     default:
5580       fatal("No such CMS_op");
5581   }
5582 }
5583 
5584 #ifndef PRODUCT
5585 size_t const CMSCollector::skip_header_HeapWords() {
5586   return FreeChunk::header_size();
5587 }
5588 
5589 // Try and collect here conditions that should hold when
5590 // CMS thread is exiting. The idea is that the foreground GC
5591 // thread should not be blocked if it wants to terminate
5592 // the CMS thread and yet continue to run the VM for a while
5593 // after that.
5594 void CMSCollector::verify_ok_to_terminate() const {
5595   assert(Thread::current()->is_ConcurrentGC_thread(),


< prev index next >