1 /* 2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1MonitoringSupport.hpp" 28 #include "gc/g1/g1Policy.hpp" 29 #include "gc/g1/g1MemoryPool.hpp" 30 #include "gc/shared/hSpaceCounters.hpp" 31 #include "memory/metaspaceCounters.hpp" 32 #include "services/memoryPool.hpp" 33 34 class G1GenerationCounters : public GenerationCounters { 35 protected: 36 G1MonitoringSupport* _g1mm; 37 38 public: 39 G1GenerationCounters(G1MonitoringSupport* g1mm, 40 const char* name, int ordinal, int spaces, 41 size_t min_capacity, size_t max_capacity, 42 size_t curr_capacity) 43 : GenerationCounters(name, ordinal, spaces, min_capacity, 44 max_capacity, curr_capacity), _g1mm(g1mm) { } 45 }; 46 47 class G1YoungGenerationCounters : public G1GenerationCounters { 48 public: 49 // We pad the capacity three times given that the young generation 50 // contains three spaces (eden and two survivors). 51 G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size) 52 : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */, 53 G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */, 54 G1MonitoringSupport::pad_capacity(max_size, 3), 55 G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) { 56 if (UsePerfData) { 57 update_all(); 58 } 59 } 60 61 virtual void update_all() { 62 size_t committed = 63 G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3); 64 _current_size->set_value(committed); 65 } 66 }; 67 68 class G1OldGenerationCounters : public G1GenerationCounters { 69 public: 70 G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size) 71 : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */, 72 G1MonitoringSupport::pad_capacity(0) /* min_capacity */, 73 G1MonitoringSupport::pad_capacity(max_size), 74 G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) { 75 if (UsePerfData) { 76 update_all(); 77 } 78 } 79 80 virtual void update_all() { 81 size_t committed = 82 G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed()); 83 _current_size->set_value(committed); 84 } 85 }; 86 87 size_t G1MonitoringSupport::old_gen_committed() { 88 return _old_space_committed + 89 (use_legacy_monitoring() ? 0 : _humongous_space_committed + _archive_space_committed); 90 } 91 92 size_t G1MonitoringSupport::old_gen_used() { 93 return old_space_used() + 94 (use_legacy_monitoring() ? 0 : humongous_space_used() + archive_space_used()); 95 } 96 97 G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : 98 _g1h(g1h), 99 _use_legacy_monitoring(G1UseLegacyMonitoring), 100 101 _full_memory_manager(G1UseLegacyMonitoring ? "G1 Old Generation" : "G1 Full", "end of major GC"), 102 _incremental_memory_manager("G1 Young Generation", "end of minor GC"), 103 _young_memory_manager("G1 Young", "end of young GC"), 104 _mixed_memory_manager("G1 Mixed", "end of mixed GC"), 105 _conc_memory_manager("G1 Concurrent Cycle", "end of concurrent cycle"), 106 107 _eden_space_pool(NULL), 108 _survivor_space_pool(NULL), 109 _old_space_pool(NULL), 110 _archive_space_pool(NULL), 111 _humongous_space_pool(NULL), 112 113 _incremental_collection_counters(NULL), 114 _full_collection_counters(NULL), 115 _conc_collection_counters(NULL), 116 117 _young_gen_counters(NULL), _old_gen_counters(NULL), 118 119 _old_space_counters(NULL), _eden_space_counters(NULL), 120 _from_space_counters(NULL), _to_space_counters(NULL), 121 122 _overall_committed(0), _overall_used(0), 123 _young_gen_committed(0), 124 _eden_space_committed(0), _eden_space_used(0), 125 _survivor_space_committed(0), _survivor_space_used(0), 126 _old_space_committed(0), _old_space_used(0), 127 _archive_space_committed(0), _archive_space_used(0), 128 _humongous_space_committed(0), _humongous_space_used(0) { 129 130 // Counters for garbage collections. 131 132 // Compute initial capacities. Somewhat random, as they depend 133 // on what's happened so far during JVM initialization. 134 recalculate_sizes(); 135 136 // name "collector.0". In a generational collector this would be the 137 // young generation collection. 138 _incremental_collection_counters = 139 new CollectorCounters("G1 incremental collections", 0); 140 // name "collector.1". In a generational collector this would be the 141 // old generation collection. 142 _full_collection_counters = 143 new CollectorCounters("G1 stop-the-world full collections", 1); 144 // name "collector.2". In a generational collector this would be the 145 // STW phases in concurrent collection. 146 _conc_collection_counters = 147 new CollectorCounters("G1 stop-the-world phases", 2); 148 149 // "Generation" and "Space" counters. 150 // 151 // name "generation.1" This is logically the old generation in 152 // generational GC terms. The "1, 1" parameters are for 153 // the n-th generation (=1) with 1 space. 154 // Counters are created from minCapacity, maxCapacity, and capacity 155 _old_gen_counters = new G1OldGenerationCounters(this, "old", _g1h->max_capacity()); 156 157 // name "generation.1.space.0" 158 // Counters are created from maxCapacity, capacity, initCapacity, 159 // and used. 160 _old_space_counters = new HSpaceCounters(_old_gen_counters->name_space(), 161 "space", 0 /* ordinal */, 162 pad_capacity(g1h->max_capacity()) /* max_capacity */, 163 pad_capacity(old_gen_committed()) /* init_capacity */); 164 165 // Young collection set 166 // name "generation.0". This is logically the young generation. 167 // The "0, 3" are parameters for the n-th generation (=0) with 3 spaces. 168 // See _old_collection_counters for additional counters 169 _young_gen_counters = new G1YoungGenerationCounters(this, "young", _g1h->max_capacity()); 170 171 const char* young_collection_name_space = _young_gen_counters->name_space(); 172 173 // name "generation.0.space.0" 174 // See _old_space_counters for additional counters 175 _eden_space_counters = new HSpaceCounters(young_collection_name_space, 176 "eden", 0 /* ordinal */, 177 pad_capacity(g1h->max_capacity()) /* max_capacity */, 178 pad_capacity(_eden_space_committed) /* init_capacity */); 179 180 // name "generation.0.space.1" 181 // See _old_space_counters for additional counters 182 // Set the arguments to indicate that this survivor space is not used. 183 _from_space_counters = new HSpaceCounters(young_collection_name_space, 184 "s0", 1 /* ordinal */, 185 pad_capacity(0) /* max_capacity */, 186 pad_capacity(0) /* init_capacity */); 187 // Given that this survivor space is not used, we update it here 188 // once to reflect that its used space is 0 so that we don't have to 189 // worry about updating it again later. 190 if (UsePerfData) { 191 _from_space_counters->update_used(0); 192 } 193 194 // name "generation.0.space.2" 195 // See _old_space_counters for additional counters 196 _to_space_counters = new HSpaceCounters(young_collection_name_space, 197 "s1", 2 /* ordinal */, 198 pad_capacity(g1h->max_capacity()) /* max_capacity */, 199 pad_capacity(_survivor_space_committed) /* init_capacity */); 200 } 201 202 G1MonitoringSupport::~G1MonitoringSupport() { 203 delete _eden_space_pool; 204 delete _survivor_space_pool; 205 delete _old_space_pool; 206 delete _archive_space_pool; 207 delete _humongous_space_pool; 208 } 209 210 void G1MonitoringSupport::initialize_serviceability() { 211 _eden_space_pool = new G1EdenPool(_g1h, _eden_space_committed); 212 _survivor_space_pool = new G1SurvivorPool(_g1h, _survivor_space_committed); 213 _old_space_pool = new G1OldPool(_g1h, _old_space_committed, _g1h->max_capacity()); 214 _archive_space_pool = new G1ArchivePool(_g1h, _archive_space_committed); 215 _humongous_space_pool = new G1HumongousPool(_g1h, _humongous_space_committed); 216 217 // Pools must be added to each memory manager in the order specified 218 // below: TestMemoryMXBeansAndPoolsPresence.java expects them so. 219 220 if (use_legacy_monitoring()) { 221 _incremental_memory_manager.add_pool(_eden_space_pool); 222 _incremental_memory_manager.add_pool(_survivor_space_pool); 223 // Incremental GCs can affect the humongous pool, but legacy behavior ignores it. 224 // _incremental_memory_manager.add_pool(_humongous_space_pool); 225 _incremental_memory_manager.add_pool(_old_space_pool, false /* always_affected_by_gc */); 226 } else { 227 _young_memory_manager.add_pool(_eden_space_pool); 228 _young_memory_manager.add_pool(_survivor_space_pool); 229 _young_memory_manager.add_pool(_humongous_space_pool); 230 231 _mixed_memory_manager.add_pool(_eden_space_pool); 232 _mixed_memory_manager.add_pool(_survivor_space_pool); 233 _mixed_memory_manager.add_pool(_humongous_space_pool); 234 _mixed_memory_manager.add_pool(_old_space_pool); 235 236 _conc_memory_manager.add_pool(_humongous_space_pool); 237 _conc_memory_manager.add_pool(_old_space_pool); 238 } 239 240 _full_memory_manager.add_pool(_eden_space_pool); 241 _full_memory_manager.add_pool(_survivor_space_pool); 242 if (!use_legacy_monitoring()) { 243 _full_memory_manager.add_pool(_humongous_space_pool); 244 _full_memory_manager.add_pool(_archive_space_pool); 245 } 246 _full_memory_manager.add_pool(_old_space_pool); 247 248 // Update pool and jstat counter content 249 update_sizes(); 250 } 251 252 GrowableArray<GCMemoryManager*> G1MonitoringSupport::memory_managers() { 253 GrowableArray<GCMemoryManager*> memory_managers(4); 254 if (use_legacy_monitoring()) { 255 memory_managers.append(&_incremental_memory_manager); 256 } else { 257 memory_managers.append(&_young_memory_manager); 258 memory_managers.append(&_mixed_memory_manager); 259 memory_managers.append(&_conc_memory_manager); 260 } 261 memory_managers.append(&_full_memory_manager); 262 return memory_managers; 263 } 264 265 GrowableArray<MemoryPool*> G1MonitoringSupport::memory_pools() { 266 GrowableArray<MemoryPool*> memory_pools(5); 267 memory_pools.append(_eden_space_pool); 268 memory_pools.append(_survivor_space_pool); 269 memory_pools.append(_old_space_pool); 270 if (!use_legacy_monitoring()) { 271 memory_pools.append(_humongous_space_pool); 272 memory_pools.append(_archive_space_pool); 273 } 274 return memory_pools; 275 } 276 277 void G1MonitoringSupport::recalculate_sizes() { 278 assert_heap_locked_or_at_safepoint(true); 279 280 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 281 282 // Recalculate all sizes from scratch. 283 284 uint eden_regions_count = _g1h->eden_regions_count(); 285 uint survivor_regions_count = _g1h->survivor_regions_count(); 286 uint young_regions_count = _g1h->young_regions_count(); 287 assert(young_regions_count == eden_regions_count + survivor_regions_count, "invariant"); 288 uint old_regions_count = _g1h->old_regions_count(); 289 uint archive_regions_count = _g1h->archive_regions_count(); 290 uint humongous_regions_count = _g1h->humongous_regions_count(); 291 292 // Max length includes any potential extensions to the young gen 293 // we'll do when the GC locker is active. 294 uint young_regions_count_max = _g1h->g1_policy()->young_list_max_length(); 295 assert(young_regions_count_max >= survivor_regions_count, "invariant"); 296 uint eden_regions_count_max = young_regions_count_max - survivor_regions_count; 297 298 _overall_used = _g1h->used_unlocked(); 299 _eden_space_used = (size_t)eden_regions_count * HeapRegion::GrainBytes; 300 _survivor_space_used = (size_t)survivor_regions_count * HeapRegion::GrainBytes; 301 _archive_space_used = (size_t)archive_regions_count * HeapRegion::GrainBytes; 302 _humongous_space_used = (size_t)humongous_regions_count * HeapRegion::GrainBytes; 303 304 // We separately keep track of the humongous and archive spaces, no 305 // matter which mode we're in. In legacy mode, the old space is the 306 // sum of the old, humongous and archive spaces, but in default mode 307 // it does not include the humongous and archive spaces. The old 308 // generation as a whole (in contrast to the old space), always 309 // includes the humongous and archive spaces. See the definitions of 310 // old_gen_committed() and old_gen_used(). 311 size_t excess_old = use_legacy_monitoring() ? 0 : _humongous_space_used + _archive_space_used; 312 _old_space_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used + excess_old); 313 314 // First, calculate the committed sizes that can be calculated independently. 315 _survivor_space_committed = _survivor_space_used; 316 _old_space_committed = HeapRegion::align_up_to_region_byte_size(_old_space_used); 317 _archive_space_committed = _archive_space_used; 318 _humongous_space_committed = _humongous_space_used; 319 320 // Next, start with the overall committed size. 321 size_t committed = _overall_committed = _g1h->capacity(); 322 323 // Remove the committed size we have calculated so far (for the 324 // survivor, old, archive, and humongous spaces). 325 assert(committed >= (_survivor_space_committed + _old_space_committed + excess_old), "sanity"); 326 committed -= _survivor_space_committed + _old_space_committed + excess_old; 327 328 // Next, calculate and remove the committed size for the eden. 329 _eden_space_committed = (size_t)eden_regions_count_max * HeapRegion::GrainBytes; 330 // Somewhat defensive: be robust in case there are inaccuracies in 331 // the calculations 332 _eden_space_committed = MIN2(_eden_space_committed, committed); 333 committed -= _eden_space_committed; 334 335 // Finally, give the rest to the old space... 336 _old_space_committed += committed; 337 // ..and calculate the young gen committed. 338 _young_gen_committed = _eden_space_committed + _survivor_space_committed; 339 340 assert(_overall_committed == 341 (_eden_space_committed + _survivor_space_committed + _old_space_committed + excess_old), 342 "the committed sizes should add up"); 343 // Somewhat defensive: cap the eden used size to make sure it 344 // never exceeds the committed size. 345 _eden_space_used = MIN2(_eden_space_used, _eden_space_committed); 346 347 // _survivor_space_committed and _old_space_committed are calculated in terms of 348 // the corresponding _*_used value, so the next two conditions should hold. 349 assert(_survivor_space_used <= _survivor_space_committed, "post-condition"); 350 assert(_old_space_used <= _old_space_committed, "post-condition"); 351 } 352 353 void G1MonitoringSupport::update_sizes() { 354 recalculate_sizes(); 355 if (UsePerfData) { 356 _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed)); 357 _eden_space_counters->update_used(eden_space_used()); 358 // only the "to" survivor space is active, so we don't need to 359 // update the counters for the "from" survivor space 360 _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed)); 361 _to_space_counters->update_used(survivor_space_used()); 362 _old_space_counters->update_capacity(pad_capacity(old_gen_committed())); 363 _old_space_counters->update_used(old_gen_used()); 364 365 _young_gen_counters->update_all(); 366 _old_gen_counters->update_all(); 367 368 MetaspaceCounters::update_performance_counters(); 369 CompressedClassSpaceCounters::update_performance_counters(); 370 } 371 } 372 373 void G1MonitoringSupport::update_eden_size() { 374 // Recalculate everything. Should be fast enough and we are sure not to miss anything. 375 recalculate_sizes(); 376 if (UsePerfData) { 377 _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed)); 378 _eden_space_counters->update_used(eden_space_used()); 379 } 380 } 381 382 MemoryUsage G1MonitoringSupport::memory_usage() { 383 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 384 return MemoryUsage(InitialHeapSize, _overall_used, _overall_committed, _g1h->max_capacity()); 385 } 386 387 MemoryUsage G1MonitoringSupport::eden_space_memory_usage(size_t initial_size, size_t max_size) { 388 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 389 return MemoryUsage(initial_size, eden_space_used(), _eden_space_committed, max_size); 390 } 391 392 MemoryUsage G1MonitoringSupport::survivor_space_memory_usage(size_t initial_size, size_t max_size) { 393 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 394 return MemoryUsage(initial_size, survivor_space_used(), _survivor_space_committed, max_size); 395 } 396 397 MemoryUsage G1MonitoringSupport::old_space_memory_usage(size_t initial_size, size_t max_size) { 398 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 399 return MemoryUsage(initial_size, old_space_used(), _old_space_committed, max_size); 400 } 401 402 MemoryUsage G1MonitoringSupport::archive_space_memory_usage(size_t initial_size, size_t max_size) { 403 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 404 return MemoryUsage(initial_size, archive_space_used(), _archive_space_committed, max_size); 405 } 406 407 MemoryUsage G1MonitoringSupport::humongous_space_memory_usage(size_t initial_size, size_t max_size) { 408 MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); 409 return MemoryUsage(initial_size, humongous_space_used(), _humongous_space_committed, max_size); 410 } 411 412 TraceConcMemoryManagerStats::TraceConcMemoryManagerStats(Stage stage, GCCause::Cause cause) 413 : TraceMemoryManagerStats() { 414 GCMemoryManager* manager = G1CollectedHeap::heap()->g1mm()->conc_memory_manager(); 415 switch (stage) { 416 case CycleStart: 417 initialize(manager /* GC manager */, 418 cause /* cause of the GC */, 419 true /* allMemoryPoolsAffected */, 420 true /* recordGCBeginTime */, 421 true /* recordPreGCUsage */, 422 false /* recordPeakUsage */, 423 false /* recordPostGCusage */, 424 false /* recordAccumulatedGCTime */, 425 false /* recordGCEndTime */, 426 false /* countCollection */ ); 427 break; 428 case Remark: 429 case Cleanup: 430 initialize(manager /* GC manager */, 431 cause /* cause of the GC */, 432 true /* allMemoryPoolsAffected */, 433 false /* recordGCBeginTime */, 434 false /* recordPreGCUsage */, 435 false /* recordPeakUsage */, 436 false /* recordPostGCusage */, 437 true /* recordAccumulatedGCTime */, 438 false /* recordGCEndTime */, 439 false /* countCollection */ ); 440 break; 441 case CycleEnd: 442 initialize(manager /* GC manager */, 443 cause /* cause of the GC */, 444 true /* allMemoryPoolsAffected */, 445 false /* recordGCBeginTime */, 446 false /* recordPreGCUsage */, 447 true /* recordPeakUsage */, 448 true /* recordPostGCusage */, 449 false /* recordAccumulatedGCTime */, 450 true /* recordGCEndTime */, 451 true /* countCollection */ ); 452 break; 453 default: 454 ShouldNotReachHere(); 455 break; 456 } 457 } 458 459 G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool mixed_gc) : 460 _tcs(full_gc ? g1mm->_full_collection_counters : g1mm->_incremental_collection_counters), 461 _tms(full_gc ? &g1mm->_full_memory_manager : 462 (g1mm->use_legacy_monitoring() ? &g1mm->_incremental_memory_manager : 463 (mixed_gc ? &g1mm->_mixed_memory_manager : 464 /* young */ &g1mm->_young_memory_manager)), 465 g1mm->_g1h->gc_cause(), 466 full_gc || (g1mm->use_legacy_monitoring() ? mixed_gc : true) /* allMemoryPoolsAffected */) { 467 }