1 /* 2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc_implementation/shared/mutableSpace.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/defNewGeneration.hpp" 31 #include "memory/genCollectedHeap.hpp" 32 #include "memory/generation.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/heap.hpp" 35 #include "memory/memRegion.hpp" 36 #include "memory/tenuredGeneration.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/globals.hpp" 39 #include "runtime/javaCalls.hpp" 40 #include "services/classLoadingService.hpp" 41 #include "services/lowMemoryDetector.hpp" 42 #include "services/management.hpp" 43 #include "services/memoryManager.hpp" 44 #include "services/memoryPool.hpp" 45 #include "services/memoryService.hpp" 46 #include "utilities/growableArray.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_ALL_GCS 49 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 50 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 51 #include "gc_implementation/parNew/parNewGeneration.hpp" 52 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 53 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 54 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 55 #include "services/g1MemoryPool.hpp" 56 #include "services/psMemoryPool.hpp" 57 #endif // INCLUDE_ALL_GCS 58 59 GrowableArray<MemoryPool*>* MemoryService::_pools_list = 60 new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_pools_list_size, true); 61 GrowableArray<MemoryManager*>* MemoryService::_managers_list = 62 new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryManager*>(init_managers_list_size, true); 63 64 GCMemoryManager* MemoryService::_minor_gc_manager = NULL; 65 GCMemoryManager* MemoryService::_major_gc_manager = NULL; 66 MemoryPool* MemoryService::_code_heap_pool = NULL; 67 MemoryPool* MemoryService::_metaspace_pool = NULL; 68 MemoryPool* MemoryService::_compressed_class_pool = NULL; 69 70 class GcThreadCountClosure: public ThreadClosure { 71 private: 72 int _count; 73 public: 74 GcThreadCountClosure() : _count(0) {}; 75 void do_thread(Thread* thread); 76 int count() { return _count; } 77 }; 78 79 void GcThreadCountClosure::do_thread(Thread* thread) { 80 _count++; 81 } 82 83 void MemoryService::set_universe_heap(CollectedHeap* heap) { 84 CollectedHeap::Name kind = heap->kind(); 85 switch (kind) { 86 case CollectedHeap::GenCollectedHeap : { 87 add_gen_collected_heap_info(GenCollectedHeap::heap()); 88 break; 89 } 90 #if INCLUDE_ALL_GCS 91 case CollectedHeap::ParallelScavengeHeap : { 92 add_parallel_scavenge_heap_info(ParallelScavengeHeap::heap()); 93 break; 94 } 95 case CollectedHeap::G1CollectedHeap : { 96 add_g1_heap_info(G1CollectedHeap::heap()); 97 break; 98 } 99 #endif // INCLUDE_ALL_GCS 100 default: { 101 guarantee(false, "Unrecognized kind of heap"); 102 } 103 } 104 105 // set the GC thread count 106 GcThreadCountClosure gctcc; 107 heap->gc_threads_do(&gctcc); 108 int count = gctcc.count(); 109 if (count > 0) { 110 _minor_gc_manager->set_num_gc_threads(count); 111 _major_gc_manager->set_num_gc_threads(count); 112 } 113 114 // All memory pools and memory managers are initialized. 115 // 116 _minor_gc_manager->initialize_gc_stat_info(); 117 _major_gc_manager->initialize_gc_stat_info(); 118 } 119 120 // Add memory pools for GenCollectedHeap 121 // This function currently only supports two generations collected heap. 122 // The collector for GenCollectedHeap will have two memory managers. 123 void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) { 124 CollectorPolicy* policy = heap->collector_policy(); 125 126 assert(policy->is_generation_policy(), "Only support two generations"); 127 guarantee(heap->n_gens() == 2, "Only support two-generation heap"); 128 129 GenCollectorPolicy* gen_policy = policy->as_generation_policy(); 130 if (gen_policy != NULL) { 131 GenerationSpec** specs = gen_policy->generations(); 132 Generation::Name kind = specs[0]->name(); 133 switch (kind) { 134 case Generation::DefNew: 135 _minor_gc_manager = MemoryManager::get_copy_memory_manager(); 136 break; 137 #if INCLUDE_ALL_GCS 138 case Generation::ParNew: 139 _minor_gc_manager = MemoryManager::get_parnew_memory_manager(); 140 break; 141 #endif // INCLUDE_ALL_GCS 142 default: 143 guarantee(false, "Unrecognized generation spec"); 144 break; 145 } 146 if (policy->is_mark_sweep_policy()) { 147 _major_gc_manager = MemoryManager::get_msc_memory_manager(); 148 #if INCLUDE_ALL_GCS 149 } else if (policy->is_concurrent_mark_sweep_policy()) { 150 _major_gc_manager = MemoryManager::get_cms_memory_manager(); 151 #endif // INCLUDE_ALL_GCS 152 } else { 153 guarantee(false, "Unknown two-gen policy"); 154 } 155 } else { 156 guarantee(false, "Non two-gen policy"); 157 } 158 _managers_list->append(_minor_gc_manager); 159 _managers_list->append(_major_gc_manager); 160 161 add_generation_memory_pool(heap->get_gen(minor), _major_gc_manager, _minor_gc_manager); 162 add_generation_memory_pool(heap->get_gen(major), _major_gc_manager); 163 } 164 165 #if INCLUDE_ALL_GCS 166 // Add memory pools for ParallelScavengeHeap 167 // This function currently only supports two generations collected heap. 168 // The collector for ParallelScavengeHeap will have two memory managers. 169 void MemoryService::add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap) { 170 // Two managers to keep statistics about _minor_gc_manager and _major_gc_manager GC. 171 _minor_gc_manager = MemoryManager::get_psScavenge_memory_manager(); 172 _major_gc_manager = MemoryManager::get_psMarkSweep_memory_manager(); 173 _managers_list->append(_minor_gc_manager); 174 _managers_list->append(_major_gc_manager); 175 176 add_psYoung_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager); 177 add_psOld_memory_pool(heap->old_gen(), _major_gc_manager); 178 } 179 180 void MemoryService::add_g1_heap_info(G1CollectedHeap* g1h) { 181 assert(UseG1GC, "sanity"); 182 183 _minor_gc_manager = MemoryManager::get_g1YoungGen_memory_manager(); 184 _major_gc_manager = MemoryManager::get_g1OldGen_memory_manager(); 185 _managers_list->append(_minor_gc_manager); 186 _managers_list->append(_major_gc_manager); 187 188 add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager); 189 add_g1OldGen_memory_pool(g1h, _major_gc_manager); 190 } 191 #endif // INCLUDE_ALL_GCS 192 193 MemoryPool* MemoryService::add_gen(Generation* gen, 194 const char* name, 195 bool is_heap, 196 bool support_usage_threshold) { 197 198 MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap); 199 GenerationPool* pool = new GenerationPool(gen, name, type, support_usage_threshold); 200 _pools_list->append(pool); 201 return (MemoryPool*) pool; 202 } 203 204 MemoryPool* MemoryService::add_space(ContiguousSpace* space, 205 const char* name, 206 bool is_heap, 207 size_t max_size, 208 bool support_usage_threshold) { 209 MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap); 210 ContiguousSpacePool* pool = new ContiguousSpacePool(space, name, type, max_size, support_usage_threshold); 211 212 _pools_list->append(pool); 213 return (MemoryPool*) pool; 214 } 215 216 MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen, 217 const char* name, 218 bool is_heap, 219 size_t max_size, 220 bool support_usage_threshold) { 221 MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap); 222 SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold); 223 224 _pools_list->append(pool); 225 return (MemoryPool*) pool; 226 } 227 228 #if INCLUDE_ALL_GCS 229 MemoryPool* MemoryService::add_cms_space(CompactibleFreeListSpace* space, 230 const char* name, 231 bool is_heap, 232 size_t max_size, 233 bool support_usage_threshold) { 234 MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap); 235 CompactibleFreeListSpacePool* pool = new CompactibleFreeListSpacePool(space, name, type, max_size, support_usage_threshold); 236 _pools_list->append(pool); 237 return (MemoryPool*) pool; 238 } 239 #endif // INCLUDE_ALL_GCS 240 241 // Add memory pool(s) for one generation 242 void MemoryService::add_generation_memory_pool(Generation* gen, 243 MemoryManager* major_mgr, 244 MemoryManager* minor_mgr) { 245 guarantee(gen != NULL, "No generation for memory pool"); 246 Generation::Name kind = gen->kind(); 247 int index = _pools_list->length(); 248 249 switch (kind) { 250 case Generation::DefNew: { 251 assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers"); 252 DefNewGeneration* young_gen = (DefNewGeneration*) gen; 253 // Add a memory pool for each space and young gen doesn't 254 // support low memory detection as it is expected to get filled up. 255 MemoryPool* eden = add_space(young_gen->eden(), 256 "Eden Space", 257 true, /* is_heap */ 258 young_gen->max_eden_size(), 259 false /* support_usage_threshold */); 260 MemoryPool* survivor = add_survivor_spaces(young_gen, 261 "Survivor Space", 262 true, /* is_heap */ 263 young_gen->max_survivor_size(), 264 false /* support_usage_threshold */); 265 break; 266 } 267 268 #if INCLUDE_ALL_GCS 269 case Generation::ParNew: 270 { 271 assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers"); 272 // Add a memory pool for each space and young gen doesn't 273 // support low memory detection as it is expected to get filled up. 274 ParNewGeneration* parnew_gen = (ParNewGeneration*) gen; 275 MemoryPool* eden = add_space(parnew_gen->eden(), 276 "Par Eden Space", 277 true /* is_heap */, 278 parnew_gen->max_eden_size(), 279 false /* support_usage_threshold */); 280 MemoryPool* survivor = add_survivor_spaces(parnew_gen, 281 "Par Survivor Space", 282 true, /* is_heap */ 283 parnew_gen->max_survivor_size(), 284 false /* support_usage_threshold */); 285 286 break; 287 } 288 #endif // INCLUDE_ALL_GCS 289 290 case Generation::MarkSweepCompact: { 291 assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager"); 292 add_gen(gen, 293 "Tenured Gen", 294 true, /* is_heap */ 295 true /* support_usage_threshold */); 296 break; 297 } 298 299 #if INCLUDE_ALL_GCS 300 case Generation::ConcurrentMarkSweep: 301 { 302 assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager"); 303 ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen; 304 MemoryPool* pool = add_cms_space(cms->cmsSpace(), 305 "CMS Old Gen", 306 true, /* is_heap */ 307 cms->reserved().byte_size(), 308 true /* support_usage_threshold */); 309 break; 310 } 311 #endif // INCLUDE_ALL_GCS 312 313 default: 314 assert(false, "should not reach here"); 315 // no memory pool added for others 316 break; 317 } 318 319 assert(major_mgr != NULL, "Should have at least one manager"); 320 // Link managers and the memory pools together 321 for (int i = index; i < _pools_list->length(); i++) { 322 MemoryPool* pool = _pools_list->at(i); 323 major_mgr->add_pool(pool); 324 if (minor_mgr != NULL) { 325 minor_mgr->add_pool(pool); 326 } 327 } 328 } 329 330 331 #if INCLUDE_ALL_GCS 332 void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) { 333 assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers"); 334 335 // Add a memory pool for each space and young gen doesn't 336 // support low memory detection as it is expected to get filled up. 337 EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen, 338 gen->eden_space(), 339 "PS Eden Space", 340 MemoryPool::Heap, 341 false /* support_usage_threshold */); 342 343 SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen, 344 "PS Survivor Space", 345 MemoryPool::Heap, 346 false /* support_usage_threshold */); 347 348 major_mgr->add_pool(eden); 349 major_mgr->add_pool(survivor); 350 minor_mgr->add_pool(eden); 351 minor_mgr->add_pool(survivor); 352 _pools_list->append(eden); 353 _pools_list->append(survivor); 354 } 355 356 void MemoryService::add_psOld_memory_pool(PSOldGen* gen, MemoryManager* mgr) { 357 PSGenerationPool* old_gen = new PSGenerationPool(gen, 358 "PS Old Gen", 359 MemoryPool::Heap, 360 true /* support_usage_threshold */); 361 mgr->add_pool(old_gen); 362 _pools_list->append(old_gen); 363 } 364 365 void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h, 366 MemoryManager* major_mgr, 367 MemoryManager* minor_mgr) { 368 assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers"); 369 370 G1EdenPool* eden = new G1EdenPool(g1h); 371 G1SurvivorPool* survivor = new G1SurvivorPool(g1h); 372 373 major_mgr->add_pool(eden); 374 major_mgr->add_pool(survivor); 375 minor_mgr->add_pool(eden); 376 minor_mgr->add_pool(survivor); 377 _pools_list->append(eden); 378 _pools_list->append(survivor); 379 } 380 381 void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h, 382 MemoryManager* mgr) { 383 assert(mgr != NULL, "should have one manager"); 384 385 G1OldGenPool* old_gen = new G1OldGenPool(g1h); 386 mgr->add_pool(old_gen); 387 _pools_list->append(old_gen); 388 } 389 #endif // INCLUDE_ALL_GCS 390 391 void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) { 392 _code_heap_pool = new CodeHeapPool(heap, 393 "Code Cache", 394 true /* support_usage_threshold */); 395 MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager(); 396 mgr->add_pool(_code_heap_pool); 397 398 _pools_list->append(_code_heap_pool); 399 _managers_list->append(mgr); 400 } 401 402 void MemoryService::add_metaspace_memory_pools() { 403 MemoryManager* mgr = MemoryManager::get_metaspace_memory_manager(); 404 405 _metaspace_pool = new MetaspacePool(); 406 mgr->add_pool(_metaspace_pool); 407 _pools_list->append(_metaspace_pool); 408 409 if (UseCompressedClassPointers) { 410 _compressed_class_pool = new CompressedKlassSpacePool(); 411 mgr->add_pool(_compressed_class_pool); 412 _pools_list->append(_compressed_class_pool); 413 } 414 415 _managers_list->append(mgr); 416 } 417 418 MemoryManager* MemoryService::get_memory_manager(instanceHandle mh) { 419 for (int i = 0; i < _managers_list->length(); i++) { 420 MemoryManager* mgr = _managers_list->at(i); 421 if (mgr->is_manager(mh)) { 422 return mgr; 423 } 424 } 425 return NULL; 426 } 427 428 MemoryPool* MemoryService::get_memory_pool(instanceHandle ph) { 429 for (int i = 0; i < _pools_list->length(); i++) { 430 MemoryPool* pool = _pools_list->at(i); 431 if (pool->is_pool(ph)) { 432 return pool; 433 } 434 } 435 return NULL; 436 } 437 438 void MemoryService::track_memory_usage() { 439 // Track the peak memory usage 440 for (int i = 0; i < _pools_list->length(); i++) { 441 MemoryPool* pool = _pools_list->at(i); 442 pool->record_peak_memory_usage(); 443 } 444 445 // Detect low memory 446 LowMemoryDetector::detect_low_memory(); 447 } 448 449 void MemoryService::track_memory_pool_usage(MemoryPool* pool) { 450 // Track the peak memory usage 451 pool->record_peak_memory_usage(); 452 453 // Detect low memory 454 if (LowMemoryDetector::is_enabled(pool)) { 455 LowMemoryDetector::detect_low_memory(pool); 456 } 457 } 458 459 void MemoryService::gc_begin(bool fullGC, bool recordGCBeginTime, 460 bool recordAccumulatedGCTime, 461 bool recordPreGCUsage, bool recordPeakUsage) { 462 463 GCMemoryManager* mgr; 464 if (fullGC) { 465 mgr = _major_gc_manager; 466 } else { 467 mgr = _minor_gc_manager; 468 } 469 assert(mgr->is_gc_memory_manager(), "Sanity check"); 470 mgr->gc_begin(recordGCBeginTime, recordPreGCUsage, recordAccumulatedGCTime); 471 472 // Track the peak memory usage when GC begins 473 if (recordPeakUsage) { 474 for (int i = 0; i < _pools_list->length(); i++) { 475 MemoryPool* pool = _pools_list->at(i); 476 pool->record_peak_memory_usage(); 477 } 478 } 479 } 480 481 void MemoryService::gc_end(bool fullGC, bool recordPostGCUsage, 482 bool recordAccumulatedGCTime, 483 bool recordGCEndTime, bool countCollection, 484 GCCause::Cause cause) { 485 486 GCMemoryManager* mgr; 487 if (fullGC) { 488 mgr = (GCMemoryManager*) _major_gc_manager; 489 } else { 490 mgr = (GCMemoryManager*) _minor_gc_manager; 491 } 492 assert(mgr->is_gc_memory_manager(), "Sanity check"); 493 494 // register the GC end statistics and memory usage 495 mgr->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime, 496 countCollection, cause); 497 } 498 499 void MemoryService::oops_do(OopClosure* f) { 500 int i; 501 502 for (i = 0; i < _pools_list->length(); i++) { 503 MemoryPool* pool = _pools_list->at(i); 504 pool->oops_do(f); 505 } 506 for (i = 0; i < _managers_list->length(); i++) { 507 MemoryManager* mgr = _managers_list->at(i); 508 mgr->oops_do(f); 509 } 510 } 511 512 bool MemoryService::set_verbose(bool verbose) { 513 MutexLocker m(Management_lock); 514 // verbose will be set to the previous value 515 bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, Flag::MANAGEMENT); 516 assert(succeed, "Setting PrintGC flag fails"); 517 ClassLoadingService::reset_trace_class_unloading(); 518 519 return verbose; 520 } 521 522 Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) { 523 Klass* k = Management::java_lang_management_MemoryUsage_klass(CHECK_NH); 524 instanceKlassHandle ik(THREAD, k); 525 526 instanceHandle obj = ik->allocate_instance_handle(CHECK_NH); 527 528 JavaValue result(T_VOID); 529 JavaCallArguments args(10); 530 args.push_oop(obj); // receiver 531 args.push_long(usage.init_size_as_jlong()); // Argument 1 532 args.push_long(usage.used_as_jlong()); // Argument 2 533 args.push_long(usage.committed_as_jlong()); // Argument 3 534 args.push_long(usage.max_size_as_jlong()); // Argument 4 535 536 JavaCalls::call_special(&result, 537 ik, 538 vmSymbols::object_initializer_name(), 539 vmSymbols::long_long_long_long_void_signature(), 540 &args, 541 CHECK_NH); 542 return obj; 543 } 544 // 545 // GC manager type depends on the type of Generation. Depending on the space 546 // availablity and vm options the gc uses major gc manager or minor gc 547 // manager or both. The type of gc manager depends on the generation kind. 548 // For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so 549 // _fullGC is set to false ) and for other generation kinds doing 550 // mark-sweep-compact uses major gc manager (so _fullGC is set to true). 551 TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) { 552 switch (kind) { 553 case Generation::DefNew: 554 #if INCLUDE_ALL_GCS 555 case Generation::ParNew: 556 #endif // INCLUDE_ALL_GCS 557 _fullGC=false; 558 break; 559 case Generation::MarkSweepCompact: 560 #if INCLUDE_ALL_GCS 561 case Generation::ConcurrentMarkSweep: 562 #endif // INCLUDE_ALL_GCS 563 _fullGC=true; 564 break; 565 default: 566 assert(false, "Unrecognized gc generation kind."); 567 } 568 // this has to be called in a stop the world pause and represent 569 // an entire gc pause, start to finish: 570 initialize(_fullGC, cause,true, true, true, true, true, true, true); 571 } 572 TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC, 573 GCCause::Cause cause, 574 bool recordGCBeginTime, 575 bool recordPreGCUsage, 576 bool recordPeakUsage, 577 bool recordPostGCUsage, 578 bool recordAccumulatedGCTime, 579 bool recordGCEndTime, 580 bool countCollection) { 581 initialize(fullGC, cause, recordGCBeginTime, recordPreGCUsage, recordPeakUsage, 582 recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime, 583 countCollection); 584 } 585 586 // for a subclass to create then initialize an instance before invoking 587 // the MemoryService 588 void TraceMemoryManagerStats::initialize(bool fullGC, 589 GCCause::Cause cause, 590 bool recordGCBeginTime, 591 bool recordPreGCUsage, 592 bool recordPeakUsage, 593 bool recordPostGCUsage, 594 bool recordAccumulatedGCTime, 595 bool recordGCEndTime, 596 bool countCollection) { 597 _fullGC = fullGC; 598 _recordGCBeginTime = recordGCBeginTime; 599 _recordPreGCUsage = recordPreGCUsage; 600 _recordPeakUsage = recordPeakUsage; 601 _recordPostGCUsage = recordPostGCUsage; 602 _recordAccumulatedGCTime = recordAccumulatedGCTime; 603 _recordGCEndTime = recordGCEndTime; 604 _countCollection = countCollection; 605 _cause = cause; 606 607 MemoryService::gc_begin(_fullGC, _recordGCBeginTime, _recordAccumulatedGCTime, 608 _recordPreGCUsage, _recordPeakUsage); 609 } 610 611 TraceMemoryManagerStats::~TraceMemoryManagerStats() { 612 MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime, 613 _recordGCEndTime, _countCollection, _cause); 614 }