< prev index next >

src/hotspot/share/gc/g1/g1MonitoringSupport.cpp

Print this page

        

@@ -82,42 +82,59 @@
               G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
     _current_size->set_value(committed);
   }
 };
 
+size_t G1MonitoringSupport::old_gen_committed() {
+  return _old_space_committed +
+    (use_legacy_monitoring() ? 0 : _humongous_space_committed + _archive_space_committed);
+}
+
+size_t G1MonitoringSupport::old_gen_used() {
+  return old_space_used() +
+    (use_legacy_monitoring() ? 0 : humongous_space_used() + archive_space_used());
+}
+
 G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
   _g1h(g1h),
+  _use_legacy_monitoring(G1UseLegacyMonitoring),
+
+  _full_memory_manager(G1UseLegacyMonitoring ? "G1 Old Generation" : "G1 Full", "end of major GC"),
   _incremental_memory_manager("G1 Young Generation", "end of minor GC"),
-  _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
+  _young_memory_manager("G1 Young", "end of young GC"),
+  _mixed_memory_manager("G1 Mixed", "end of mixed GC"),
+  _conc_memory_manager("G1 Concurrent Cycle", "end of concurrent cycle"),
+
   _eden_space_pool(NULL),
   _survivor_space_pool(NULL),
-  _old_gen_pool(NULL),
+  _old_space_pool(NULL),
+  _archive_space_pool(NULL),
+  _humongous_space_pool(NULL),
+
   _incremental_collection_counters(NULL),
   _full_collection_counters(NULL),
   _conc_collection_counters(NULL),
-  _young_gen_counters(NULL),
-  _old_gen_counters(NULL),
-  _old_space_counters(NULL),
-  _eden_space_counters(NULL),
-  _from_space_counters(NULL),
-  _to_space_counters(NULL),
 
-  _overall_committed(0),
-  _overall_used(0),
+  _young_gen_counters(NULL),     _old_gen_counters(NULL),
+
+  _old_space_counters(NULL),     _eden_space_counters(NULL),
+  _from_space_counters(NULL),    _to_space_counters(NULL),
+
+  _overall_committed(0),         _overall_used(0),
   _young_gen_committed(0),
-  _old_gen_committed(0),
+  _eden_space_committed(0),      _eden_space_used(0),
+  _survivor_space_committed(0),  _survivor_space_used(0),
+  _old_space_committed(0),       _old_space_used(0),
+  _archive_space_committed(0),   _archive_space_used(0),
+  _humongous_space_committed(0), _humongous_space_used(0) {
 
-  _eden_space_committed(0),
-  _eden_space_used(0),
-  _survivor_space_committed(0),
-  _survivor_space_used(0),
-  _old_gen_used(0) {
+  // Counters for garbage collections.
 
+  // Compute initial capacities. Somewhat random, as they depend
+  // on what's happened so far during JVM initialization.
   recalculate_sizes();
 
-  // Counters for garbage collections
-  //
   //  name "collector.0".  In a generational collector this would be the
   // young generation collection.
   _incremental_collection_counters =
     new CollectorCounters("G1 incremental collections", 0);
   //   name "collector.1".  In a generational collector this would be the

@@ -141,11 +158,11 @@
   // Counters are created from maxCapacity, capacity, initCapacity,
   // and used.
   _old_space_counters = new HSpaceCounters(_old_gen_counters->name_space(),
     "space", 0 /* ordinal */,
     pad_capacity(g1h->max_capacity()) /* max_capacity */,
-    pad_capacity(_old_gen_committed) /* init_capacity */);
+    pad_capacity(old_gen_committed()) /* init_capacity */);
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
   //  The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
   // See  _old_collection_counters for additional counters

@@ -183,162 +200,268 @@
 }
 
 G1MonitoringSupport::~G1MonitoringSupport() {
   delete _eden_space_pool;
   delete _survivor_space_pool;
-  delete _old_gen_pool;
+  delete _old_space_pool;
+  delete _archive_space_pool;
+  delete _humongous_space_pool;
 }
 
 void G1MonitoringSupport::initialize_serviceability() {
   _eden_space_pool = new G1EdenPool(_g1h, _eden_space_committed);
   _survivor_space_pool = new G1SurvivorPool(_g1h, _survivor_space_committed);
-  _old_gen_pool = new G1OldGenPool(_g1h, _old_gen_committed, _g1h->max_capacity());
+  _old_space_pool = new G1OldPool(_g1h, _old_space_committed, _g1h->max_capacity());
+  _archive_space_pool = new G1ArchivePool(_g1h, _archive_space_committed);
+  _humongous_space_pool = new G1HumongousPool(_g1h, _humongous_space_committed);
 
-  _full_gc_memory_manager.add_pool(_eden_space_pool);
-  _full_gc_memory_manager.add_pool(_survivor_space_pool);
-  _full_gc_memory_manager.add_pool(_old_gen_pool);
+  // Pools must be added to each memory manager in the order specified
+  // below: TestMemoryMXBeansAndPoolsPresence.java expects them so.
 
+  if (use_legacy_monitoring()) {
   _incremental_memory_manager.add_pool(_eden_space_pool);
   _incremental_memory_manager.add_pool(_survivor_space_pool);
-  _incremental_memory_manager.add_pool(_old_gen_pool, false /* always_affected_by_gc */);
-}
+    // Incremental GCs can affect the humongous pool, but legacy behavior ignores it.
+    //  _incremental_memory_manager.add_pool(_humongous_space_pool);
+    _incremental_memory_manager.add_pool(_old_space_pool, false /* always_affected_by_gc */);
+  } else {
+    _young_memory_manager.add_pool(_eden_space_pool);
+    _young_memory_manager.add_pool(_survivor_space_pool);
+    _young_memory_manager.add_pool(_humongous_space_pool);
+
+    _mixed_memory_manager.add_pool(_eden_space_pool);
+    _mixed_memory_manager.add_pool(_survivor_space_pool);
+    _mixed_memory_manager.add_pool(_humongous_space_pool);
+    _mixed_memory_manager.add_pool(_old_space_pool);
 
-MemoryUsage G1MonitoringSupport::memory_usage() {
-  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
-  return MemoryUsage(InitialHeapSize, _overall_used, _overall_committed, _g1h->max_capacity());
+    _conc_memory_manager.add_pool(_humongous_space_pool);
+    _conc_memory_manager.add_pool(_old_space_pool);
+  }
+
+  _full_memory_manager.add_pool(_eden_space_pool);
+  _full_memory_manager.add_pool(_survivor_space_pool);
+  if (!use_legacy_monitoring()) {
+    _full_memory_manager.add_pool(_humongous_space_pool);
+    _full_memory_manager.add_pool(_archive_space_pool);
+  }
+  _full_memory_manager.add_pool(_old_space_pool);
+
+  // Update pool and jstat counter content
+  update_sizes();
 }
 
 GrowableArray<GCMemoryManager*> G1MonitoringSupport::memory_managers() {
-  GrowableArray<GCMemoryManager*> memory_managers(2);
+  GrowableArray<GCMemoryManager*> memory_managers(4);
+  if (use_legacy_monitoring()) {
   memory_managers.append(&_incremental_memory_manager);
-  memory_managers.append(&_full_gc_memory_manager);
+  } else {
+    memory_managers.append(&_young_memory_manager);
+    memory_managers.append(&_mixed_memory_manager);
+    memory_managers.append(&_conc_memory_manager);
+  }
+  memory_managers.append(&_full_memory_manager);
   return memory_managers;
 }
 
 GrowableArray<MemoryPool*> G1MonitoringSupport::memory_pools() {
-  GrowableArray<MemoryPool*> memory_pools(3);
+  GrowableArray<MemoryPool*> memory_pools(5);
   memory_pools.append(_eden_space_pool);
   memory_pools.append(_survivor_space_pool);
-  memory_pools.append(_old_gen_pool);
+  memory_pools.append(_old_space_pool);
+  if (!use_legacy_monitoring()) {
+    memory_pools.append(_humongous_space_pool);
+    memory_pools.append(_archive_space_pool);
+  }
   return memory_pools;
 }
 
 void G1MonitoringSupport::recalculate_sizes() {
   assert_heap_locked_or_at_safepoint(true);
 
   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
-  // Recalculate all the sizes from scratch.
 
-  uint young_list_length = _g1h->young_regions_count();
-  uint survivor_list_length = _g1h->survivor_regions_count();
-  assert(young_list_length >= survivor_list_length, "invariant");
-  uint eden_list_length = young_list_length - survivor_list_length;
+  // Recalculate all sizes from scratch.
+
+  uint eden_regions_count = _g1h->eden_regions_count();
+  uint survivor_regions_count = _g1h->survivor_regions_count();
+  uint young_regions_count = _g1h->young_regions_count();
+  assert(young_regions_count == eden_regions_count + survivor_regions_count, "invariant");
+  uint old_regions_count = _g1h->old_regions_count();
+  uint archive_regions_count = _g1h->archive_regions_count();
+  uint humongous_regions_count = _g1h->humongous_regions_count();
+
   // Max length includes any potential extensions to the young gen
   // we'll do when the GC locker is active.
-  uint young_list_max_length = _g1h->g1_policy()->young_list_max_length();
-  assert(young_list_max_length >= survivor_list_length, "invariant");
-  uint eden_list_max_length = young_list_max_length - survivor_list_length;
+  uint young_regions_count_max = _g1h->g1_policy()->young_list_max_length();
+  assert(young_regions_count_max >= survivor_regions_count, "invariant");
+  uint eden_regions_count_max = young_regions_count_max - survivor_regions_count;
 
   _overall_used = _g1h->used_unlocked();
-  _eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
-  _survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
-  _old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
+  _eden_space_used = (size_t)eden_regions_count * HeapRegion::GrainBytes;
+  _survivor_space_used = (size_t)survivor_regions_count * HeapRegion::GrainBytes;
+  _archive_space_used = (size_t)archive_regions_count * HeapRegion::GrainBytes;
+  _humongous_space_used = (size_t)humongous_regions_count * HeapRegion::GrainBytes;
+ 
+  // We separately keep track of the humongous and archive spaces, no
+  // matter which mode we're in. In legacy mode, the old space is the
+  // sum of the old, humongous and archive spaces, but in default mode
+  // it does not include the humongous and archive spaces. The old
+  // generation as a whole (in contrast to the old space), always
+  // includes the humongous and archive spaces. See the definitions of
+  // old_gen_committed() and old_gen_used().
+  size_t excess_old = use_legacy_monitoring() ? 0 : _humongous_space_used + _archive_space_used;
+  _old_space_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used + excess_old);
 
-  // First calculate the committed sizes that can be calculated independently.
+  // First, calculate the committed sizes that can be calculated independently.
   _survivor_space_committed = _survivor_space_used;
-  _old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
+  _old_space_committed = HeapRegion::align_up_to_region_byte_size(_old_space_used);
+  _archive_space_committed = _archive_space_used;
+  _humongous_space_committed = _humongous_space_used;
 
   // Next, start with the overall committed size.
-  _overall_committed = _g1h->capacity();
-  size_t committed = _overall_committed;
+  size_t committed = _overall_committed = _g1h->capacity();
 
   // Remove the committed size we have calculated so far (for the
-  // survivor and old space).
-  assert(committed >= (_survivor_space_committed + _old_gen_committed), "sanity");
-  committed -= _survivor_space_committed + _old_gen_committed;
+  // survivor, old, archive, and humongous spaces).
+  assert(committed >= (_survivor_space_committed + _old_space_committed + excess_old), "sanity");
+  committed -= _survivor_space_committed + _old_space_committed + excess_old;
 
   // Next, calculate and remove the committed size for the eden.
-  _eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
+  _eden_space_committed = (size_t)eden_regions_count_max * HeapRegion::GrainBytes;
   // Somewhat defensive: be robust in case there are inaccuracies in
   // the calculations
   _eden_space_committed = MIN2(_eden_space_committed, committed);
   committed -= _eden_space_committed;
 
   // Finally, give the rest to the old space...
-  _old_gen_committed += committed;
+  _old_space_committed += committed;
   // ..and calculate the young gen committed.
   _young_gen_committed = _eden_space_committed + _survivor_space_committed;
 
   assert(_overall_committed ==
-         (_eden_space_committed + _survivor_space_committed + _old_gen_committed),
+         (_eden_space_committed + _survivor_space_committed + _old_space_committed + excess_old),
          "the committed sizes should add up");
   // Somewhat defensive: cap the eden used size to make sure it
   // never exceeds the committed size.
   _eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
-  // _survivor_committed and _old_committed are calculated in terms of
-  // the corresponding _*_used value, so the next two conditions
-  // should hold.
+
+  // _survivor_space_committed and _old_space_committed are calculated in terms of
+  // the corresponding _*_used value, so the next two conditions should hold.
   assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
-  assert(_old_gen_used <= _old_gen_committed, "post-condition");
+  assert(_old_space_used <= _old_space_committed, "post-condition");
 }
 
 void G1MonitoringSupport::update_sizes() {
   recalculate_sizes();
   if (UsePerfData) {
     _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed));
-    _eden_space_counters->update_used(_eden_space_used);
+    _eden_space_counters->update_used(eden_space_used());
    // only the "to" survivor space is active, so we don't need to
     // update the counters for the "from" survivor space
     _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed));
-    _to_space_counters->update_used(_survivor_space_used);
-    _old_space_counters->update_capacity(pad_capacity(_old_gen_committed));
-    _old_space_counters->update_used(_old_gen_used);
+    _to_space_counters->update_used(survivor_space_used());
+    _old_space_counters->update_capacity(pad_capacity(old_gen_committed()));
+    _old_space_counters->update_used(old_gen_used());
 
     _young_gen_counters->update_all();
     _old_gen_counters->update_all();
 
     MetaspaceCounters::update_performance_counters();
     CompressedClassSpaceCounters::update_performance_counters();
   }
 }
 
 void G1MonitoringSupport::update_eden_size() {
-  // Recalculate everything - this should be fast enough and we are sure that we do not
-  // miss anything.
+  // Recalculate everything. Should be fast enough and we are sure not to miss anything.
   recalculate_sizes();
   if (UsePerfData) {
-    _eden_space_counters->update_used(_eden_space_used);
+    _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed));
+    _eden_space_counters->update_used(eden_space_used());
   }
 }
 
-MemoryUsage G1MonitoringSupport::eden_space_memory_usage(size_t initial_size, size_t max_size) {
+MemoryUsage G1MonitoringSupport::memory_usage() {
   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(InitialHeapSize, _overall_used, _overall_committed, _g1h->max_capacity());
+}
 
-  return MemoryUsage(initial_size,
-                     _eden_space_used,
-                     _eden_space_committed,
-                     max_size);
+MemoryUsage G1MonitoringSupport::eden_space_memory_usage(size_t initial_size, size_t max_size) {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(initial_size, eden_space_used(), _eden_space_committed, max_size);
 }
 
 MemoryUsage G1MonitoringSupport::survivor_space_memory_usage(size_t initial_size, size_t max_size) {
   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(initial_size, survivor_space_used(), _survivor_space_committed, max_size);
+}
 
-  return MemoryUsage(initial_size,
-                     _survivor_space_used,
-                     _survivor_space_committed,
-                     max_size);
+MemoryUsage G1MonitoringSupport::old_space_memory_usage(size_t initial_size, size_t max_size) {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(initial_size, old_space_used(), _old_space_committed, max_size);
 }
 
-MemoryUsage G1MonitoringSupport::old_gen_memory_usage(size_t initial_size, size_t max_size) {
+MemoryUsage G1MonitoringSupport::archive_space_memory_usage(size_t initial_size, size_t max_size) {
   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(initial_size, archive_space_used(), _archive_space_committed, max_size);
+}
 
-  return MemoryUsage(initial_size,
-                     _old_gen_used,
-                     _old_gen_committed,
-                     max_size);
+MemoryUsage G1MonitoringSupport::humongous_space_memory_usage(size_t initial_size, size_t max_size) {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(initial_size, humongous_space_used(), _humongous_space_committed, max_size);
+}
+
+TraceConcMemoryManagerStats::TraceConcMemoryManagerStats(Stage stage, GCCause::Cause cause)
+  : TraceMemoryManagerStats() {
+  GCMemoryManager* manager = G1CollectedHeap::heap()->g1mm()->conc_memory_manager();
+  switch (stage) {
+    case CycleStart:
+      initialize(manager /* GC manager */,
+                 cause   /* cause of the GC */,
+                 true    /* allMemoryPoolsAffected */,
+                 true    /* recordGCBeginTime */,
+                 true    /* recordPreGCUsage */,
+                 false   /* recordPeakUsage */,
+                 false   /* recordPostGCusage */,
+                 false   /* recordAccumulatedGCTime */,
+                 false   /* recordGCEndTime */,
+                 false   /* countCollection */ );
+      break;
+    case Remark:
+    case Cleanup:
+      initialize(manager /* GC manager */,
+                 cause   /* cause of the GC */,
+                 true    /* allMemoryPoolsAffected */,
+                 false   /* recordGCBeginTime */,
+                 false   /* recordPreGCUsage */,
+                 false   /* recordPeakUsage */,
+                 false   /* recordPostGCusage */,
+                 true    /* recordAccumulatedGCTime */,
+                 false   /* recordGCEndTime */,
+                 false   /* countCollection */ );
+      break;
+    case CycleEnd:
+      initialize(manager /* GC manager */,
+                 cause   /* cause of the GC */,
+                 true    /* allMemoryPoolsAffected */,
+                 false   /* recordGCBeginTime */,
+                 false   /* recordPreGCUsage */,
+                 true    /* recordPeakUsage */,
+                 true    /* recordPostGCusage */,
+                 false   /* recordAccumulatedGCTime */,
+                 true    /* recordGCEndTime */,
+                 true    /* countCollection */ );
+      break;
+    default:
+      ShouldNotReachHere();
+      break;
+  }
 }
 
-G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected) :
+G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool mixed_gc) :
   _tcs(full_gc ? g1mm->_full_collection_counters : g1mm->_incremental_collection_counters),
-  _tms(full_gc ? &g1mm->_full_gc_memory_manager : &g1mm->_incremental_memory_manager,
-       G1CollectedHeap::heap()->gc_cause(), all_memory_pools_affected) {
+  _tms(full_gc                        ? &g1mm->_full_memory_manager :
+       (g1mm->use_legacy_monitoring() ? &g1mm->_incremental_memory_manager :
+       (mixed_gc                      ? &g1mm->_mixed_memory_manager :
+        /* young */                     &g1mm->_young_memory_manager)),
+       g1mm->_g1h->gc_cause(),
+       full_gc || (g1mm->use_legacy_monitoring() ? mixed_gc : true) /* allMemoryPoolsAffected */) {
 }
< prev index next >