< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

@@ -26,11 +26,10 @@
 #include "classfile/classLoaderData.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc/cms/cmsCollectorPolicy.hpp"
 #include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsOopClosures.inline.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"

@@ -41,11 +40,10 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardGeneration.inline.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"

@@ -195,11 +193,11 @@
     promo.setSpace(cfls);
   }
 };
 
 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
-     ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
+     ReservedSpace rs, size_t initial_byte_size, size_t min_byte_size, size_t max_byte_size, CardTableRS* ct) :
   CardGeneration(rs, initial_byte_size, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   _did_compact(false)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();

@@ -246,10 +244,12 @@
   // promoting generation, we'll instead just use the minimum
   // object size (which today is a header's worth of space);
   // note that all arithmetic is in units of HeapWords.
   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
   assert(_dilatation_factor >= 1.0, "from previous assert");
+
+  initialize_performance_counters(min_byte_size, max_byte_size);
 }
 
 
 // The field "_initiating_occupancy" represents the occupancy percentage
 // at which we trigger a new collection cycle.  Unless explicitly specified

@@ -298,21 +298,20 @@
 
   }
 }
 
 AdaptiveSizePolicy* CMSCollector::size_policy() {
-  CMSHeap* heap = CMSHeap::heap();
-  return heap->gen_policy()->size_policy();
+  return CMSHeap::heap()->size_policy();
 }
 
-void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
+void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_byte_size, size_t max_byte_size) {
 
   const char* gen_name = "old";
-  GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
+
   // Generation Counters - generation 1, 1 subspace
   _gen_counters = new GenerationCounters(gen_name, 1, 1,
-      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
+      min_byte_size, max_byte_size, &_virtual_space);
 
   _space_counters = new GSpaceCounters(gen_name, 0,
                                        _virtual_space.reserved_size(),
                                        this, _gen_counters);
 }

@@ -437,12 +436,11 @@
                              CMSCollector::Idling;
 bool CMSCollector::_foregroundGCIsActive = false;
 bool CMSCollector::_foregroundGCShouldWait = false;
 
 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
-                           CardTableRS*                   ct,
-                           ConcurrentMarkSweepPolicy*     cp):
+                           CardTableRS*                   ct):
   _cmsGen(cmsGen),
   _ct(ct),
   _ref_processor(NULL),    // will be set later
   _conc_workers(NULL),     // may be set later
   _abort_preclean(false),

@@ -480,11 +478,10 @@
 #endif
   _collection_count_start(0),
   _verifying(false),
   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
   _completed_initialization(false),
-  _collector_policy(cp),
   _should_unload_classes(CMSClassUnloadingEnabled),
   _concurrent_cycles_since_last_unload(0),
   _roots_scanning_options(GenCollectedHeap::SO_None),
   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),

@@ -1180,12 +1177,10 @@
 
   // We start a collection if we believe an incremental collection may fail;
   // this is not likely to be productive in practice because it's probably too
   // late anyway.
   CMSHeap* heap = CMSHeap::heap();
-  assert(heap->collector_policy()->is_generation_policy(),
-         "You may want to check the correctness of the following");
   if (heap->incremental_collection_will_fail(true /* consult_young */)) {
     log.print("CMSCollector: collect because incremental collection will fail ");
     return true;
   }
 

@@ -1496,11 +1491,11 @@
                                          _young_gen->eden()->used(),
                                          _cmsGen->max_capacity(),
                                          max_eden_size,
                                          full,
                                          gc_cause,
-                                         heap->collector_policy());
+                                         heap->soft_ref_policy());
 
   // Reset the expansion cause, now that we just completed
   // a collection cycle.
   clear_expansion_cause();
   _foregroundGCIsActive = false;

@@ -1888,11 +1883,11 @@
                          p2i(Thread::current()), _collectorState);
     assert(_foregroundGCShouldWait, "block post-condition");
   }
 
   // Should this be in gc_epilogue?
-  collector_policy()->counters()->update_counters();
+  CMSHeap::heap()->counters()->update_counters();
 
   {
     // Clear _foregroundGCShouldWait and, in the event that the
     // foreground collector is waiting, notify it, before
     // returning.
< prev index next >