1 /*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "gc/parallel/adjoiningGenerations.hpp"
28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
29 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
30 #include "gc/parallel/parallelArguments.hpp"
31 #include "gc/parallel/objectStartArray.inline.hpp"
32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
34 #include "gc/parallel/psMarkSweepProxy.hpp"
35 #include "gc/parallel/psMemoryPool.hpp"
36 #include "gc/parallel/psParallelCompact.inline.hpp"
37 #include "gc/parallel/psPromotionManager.hpp"
38 #include "gc/parallel/psScavenge.hpp"
39 #include "gc/parallel/psVMOperations.hpp"
40 #include "gc/shared/gcHeapSummary.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcWhen.hpp"
43 #include "gc/shared/genArguments.hpp"
44 #include "gc/shared/locationPrinter.inline.hpp"
45 #include "gc/shared/scavengableNMethods.hpp"
46 #include "logging/log.hpp"
47 #include "memory/iterator.hpp"
48 #include "memory/metaspaceCounters.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "services/memoryManager.hpp"
99 const size_t old_capacity = _old_gen->capacity_in_bytes();
100 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
101 _size_policy =
102 new PSAdaptiveSizePolicy(eden_capacity,
103 initial_promo_size,
104 young_gen()->to_space()->capacity_in_bytes(),
105 GenAlignment,
106 max_gc_pause_sec,
107 max_gc_minor_pause_sec,
108 GCTimeRatio
109 );
110
111 assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary ||
112 (old_gen()->virtual_space()->high_boundary() ==
113 young_gen()->virtual_space()->low_boundary()),
114 "Boundaries must meet");
115 // initialize the policy counters - 2 collectors, 2 generations
116 _gc_policy_counters =
117 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
118
119 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
120 return JNI_ENOMEM;
121 }
122
123 // Set up WorkGang
124 _workers.initialize_workers();
125
126 return JNI_OK;
127 }
128
129 void ParallelScavengeHeap::initialize_serviceability() {
130
131 _eden_pool = new EdenMutableSpacePool(_young_gen,
132 _young_gen->eden_space(),
133 "PS Eden Space",
134 false /* support_usage_threshold */);
135
136 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
137 "PS Survivor Space",
138 false /* support_usage_threshold */);
139
148 _old_manager->add_pool(_survivor_pool);
149 _old_manager->add_pool(_old_pool);
150
151 _young_manager->add_pool(_eden_pool);
152 _young_manager->add_pool(_survivor_pool);
153
154 }
155
156 class PSIsScavengable : public BoolObjectClosure {
157 bool do_object_b(oop obj) {
158 return ParallelScavengeHeap::heap()->is_in_young(obj);
159 }
160 };
161
162 static PSIsScavengable _is_scavengable;
163
164 void ParallelScavengeHeap::post_initialize() {
165 CollectedHeap::post_initialize();
166 // Need to init the tenuring threshold
167 PSScavenge::initialize();
168 if (UseParallelOldGC) {
169 PSParallelCompact::post_initialize();
170 } else {
171 PSMarkSweepProxy::initialize();
172 }
173 PSPromotionManager::initialize();
174
175 ScavengableNMethods::initialize(&_is_scavengable);
176 }
177
178 void ParallelScavengeHeap::update_counters() {
179 young_gen()->update_counters();
180 old_gen()->update_counters();
181 MetaspaceCounters::update_performance_counters();
182 CompressedClassSpaceCounters::update_performance_counters();
183 }
184
185 size_t ParallelScavengeHeap::capacity() const {
186 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
187 return value;
188 }
189
190 size_t ParallelScavengeHeap::used() const {
191 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
192 return value;
397 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
398 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
399 // Size is too big for eden, or gc is locked out.
400 return old_gen()->allocate(size);
401 }
402
403 // If a "death march" is in progress, allocate from the old gen a limited
404 // number of times before doing a GC.
405 if (_death_march_count > 0) {
406 if (_death_march_count < 64) {
407 ++_death_march_count;
408 return old_gen()->allocate(size);
409 } else {
410 _death_march_count = 0;
411 }
412 }
413 return NULL;
414 }
415
416 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
417 if (UseParallelOldGC) {
418 // The do_full_collection() parameter clear_all_soft_refs
419 // is interpreted here as maximum_compaction which will
420 // cause SoftRefs to be cleared.
421 bool maximum_compaction = clear_all_soft_refs;
422 PSParallelCompact::invoke(maximum_compaction);
423 } else {
424 PSMarkSweepProxy::invoke(clear_all_soft_refs);
425 }
426 }
427
428 // Failed allocation policy. Must be called from the VM thread, and
429 // only at a safepoint! Note that this method has policy for allocation
430 // flow, and NOT collection policy. So we do not check for gc collection
431 // time over limit here, that is the responsibility of the heap specific
432 // collection methods. This method decides where to attempt allocations,
433 // and when to attempt collections, but no collection specific policy.
434 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
435 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
436 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
437 assert(!is_gc_active(), "not reentrant");
438 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
439
440 // We assume that allocation in eden will fail unless we collect.
441
442 // First level allocation failure, scavenge and allocate in young gen.
443 GCCauseSetter gccs(this, GCCause::_allocation_failure);
444 const bool invoked_full_gc = PSScavenge::invoke();
445 HeapWord* result = young_gen()->allocate(size);
537 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
538 if (young_gen()->is_in_reserved(addr)) {
539 assert(young_gen()->is_in(addr),
540 "addr should be in allocated part of young gen");
541 // called from os::print_location by find or VMError
542 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
543 Unimplemented();
544 } else if (old_gen()->is_in_reserved(addr)) {
545 assert(old_gen()->is_in(addr),
546 "addr should be in allocated part of old gen");
547 return old_gen()->start_array()->object_start((HeapWord*)addr);
548 }
549 return 0;
550 }
551
552 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
553 return block_start(addr) == addr;
554 }
555
556 jlong ParallelScavengeHeap::millis_since_last_gc() {
557 return UseParallelOldGC ?
558 PSParallelCompact::millis_since_last_gc() :
559 PSMarkSweepProxy::millis_since_last_gc();
560 }
561
562 void ParallelScavengeHeap::prepare_for_verify() {
563 ensure_parsability(false); // no need to retire TLABs for verification
564 }
565
566 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
567 PSOldGen* old = old_gen();
568 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
569 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
570 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
571
572 PSYoungGen* young = young_gen();
573 VirtualSpaceSummary young_summary(young->reserved().start(),
574 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
575
576 MutableSpace* eden = young_gen()->eden_space();
577 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
578
579 MutableSpace* from = young_gen()->from_space();
582 MutableSpace* to = young_gen()->to_space();
583 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
584
585 VirtualSpaceSummary heap_summary = create_heap_space_summary();
586 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
587 }
588
589 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
590 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
591 }
592
593 void ParallelScavengeHeap::print_on(outputStream* st) const {
594 young_gen()->print_on(st);
595 old_gen()->print_on(st);
596 MetaspaceUtils::print_on(st);
597 }
598
599 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
600 this->CollectedHeap::print_on_error(st);
601
602 if (UseParallelOldGC) {
603 st->cr();
604 PSParallelCompact::print_on_error(st);
605 }
606 }
607
608 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
609 ParallelScavengeHeap::heap()->workers().threads_do(tc);
610 }
611
612 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
613 ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st);
614 }
615
616 void ParallelScavengeHeap::print_tracing_info() const {
617 AdaptiveSizePolicyOutput::print();
618 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
619 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
620 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
621 }
622
623 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
624 const PSYoungGen* const young = young_gen();
625 const MutableSpace* const eden = young->eden_space();
626 const MutableSpace* const from = young->from_space();
627 const PSOldGen* const old = old_gen();
628
629 return PreGenGCValues(young->used_in_bytes(),
630 young->capacity_in_bytes(),
631 eden->used_in_bytes(),
632 eden->capacity_in_bytes(),
633 from->used_in_bytes(),
634 from->capacity_in_bytes(),
635 old->used_in_bytes(),
636 old->capacity_in_bytes());
637 }
638
639 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
640 const PSYoungGen* const young = young_gen();
|
1 /*
2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "gc/parallel/adjoiningGenerations.hpp"
28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
29 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
30 #include "gc/parallel/parallelArguments.hpp"
31 #include "gc/parallel/objectStartArray.inline.hpp"
32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
34 #include "gc/parallel/psMemoryPool.hpp"
35 #include "gc/parallel/psParallelCompact.inline.hpp"
36 #include "gc/parallel/psPromotionManager.hpp"
37 #include "gc/parallel/psScavenge.hpp"
38 #include "gc/parallel/psVMOperations.hpp"
39 #include "gc/shared/gcHeapSummary.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "gc/shared/gcWhen.hpp"
42 #include "gc/shared/genArguments.hpp"
43 #include "gc/shared/locationPrinter.inline.hpp"
44 #include "gc/shared/scavengableNMethods.hpp"
45 #include "logging/log.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/metaspaceCounters.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/handles.inline.hpp"
51 #include "runtime/java.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "services/memoryManager.hpp"
98 const size_t old_capacity = _old_gen->capacity_in_bytes();
99 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
100 _size_policy =
101 new PSAdaptiveSizePolicy(eden_capacity,
102 initial_promo_size,
103 young_gen()->to_space()->capacity_in_bytes(),
104 GenAlignment,
105 max_gc_pause_sec,
106 max_gc_minor_pause_sec,
107 GCTimeRatio
108 );
109
110 assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary ||
111 (old_gen()->virtual_space()->high_boundary() ==
112 young_gen()->virtual_space()->low_boundary()),
113 "Boundaries must meet");
114 // initialize the policy counters - 2 collectors, 2 generations
115 _gc_policy_counters =
116 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
117
118 if (!PSParallelCompact::initialize()) {
119 return JNI_ENOMEM;
120 }
121
122 // Set up WorkGang
123 _workers.initialize_workers();
124
125 return JNI_OK;
126 }
127
128 void ParallelScavengeHeap::initialize_serviceability() {
129
130 _eden_pool = new EdenMutableSpacePool(_young_gen,
131 _young_gen->eden_space(),
132 "PS Eden Space",
133 false /* support_usage_threshold */);
134
135 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
136 "PS Survivor Space",
137 false /* support_usage_threshold */);
138
147 _old_manager->add_pool(_survivor_pool);
148 _old_manager->add_pool(_old_pool);
149
150 _young_manager->add_pool(_eden_pool);
151 _young_manager->add_pool(_survivor_pool);
152
153 }
154
155 class PSIsScavengable : public BoolObjectClosure {
156 bool do_object_b(oop obj) {
157 return ParallelScavengeHeap::heap()->is_in_young(obj);
158 }
159 };
160
161 static PSIsScavengable _is_scavengable;
162
163 void ParallelScavengeHeap::post_initialize() {
164 CollectedHeap::post_initialize();
165 // Need to init the tenuring threshold
166 PSScavenge::initialize();
167 PSParallelCompact::post_initialize();
168 PSPromotionManager::initialize();
169
170 ScavengableNMethods::initialize(&_is_scavengable);
171 }
172
173 void ParallelScavengeHeap::update_counters() {
174 young_gen()->update_counters();
175 old_gen()->update_counters();
176 MetaspaceCounters::update_performance_counters();
177 CompressedClassSpaceCounters::update_performance_counters();
178 }
179
180 size_t ParallelScavengeHeap::capacity() const {
181 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
182 return value;
183 }
184
185 size_t ParallelScavengeHeap::used() const {
186 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
187 return value;
392 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
393 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
394 // Size is too big for eden, or gc is locked out.
395 return old_gen()->allocate(size);
396 }
397
398 // If a "death march" is in progress, allocate from the old gen a limited
399 // number of times before doing a GC.
400 if (_death_march_count > 0) {
401 if (_death_march_count < 64) {
402 ++_death_march_count;
403 return old_gen()->allocate(size);
404 } else {
405 _death_march_count = 0;
406 }
407 }
408 return NULL;
409 }
410
411 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
412 // The do_full_collection() parameter clear_all_soft_refs
413 // is interpreted here as maximum_compaction which will
414 // cause SoftRefs to be cleared.
415 bool maximum_compaction = clear_all_soft_refs;
416 PSParallelCompact::invoke(maximum_compaction);
417 }
418
419 // Failed allocation policy. Must be called from the VM thread, and
420 // only at a safepoint! Note that this method has policy for allocation
421 // flow, and NOT collection policy. So we do not check for gc collection
422 // time over limit here, that is the responsibility of the heap specific
423 // collection methods. This method decides where to attempt allocations,
424 // and when to attempt collections, but no collection specific policy.
425 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
426 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
427 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
428 assert(!is_gc_active(), "not reentrant");
429 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
430
431 // We assume that allocation in eden will fail unless we collect.
432
433 // First level allocation failure, scavenge and allocate in young gen.
434 GCCauseSetter gccs(this, GCCause::_allocation_failure);
435 const bool invoked_full_gc = PSScavenge::invoke();
436 HeapWord* result = young_gen()->allocate(size);
528 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
529 if (young_gen()->is_in_reserved(addr)) {
530 assert(young_gen()->is_in(addr),
531 "addr should be in allocated part of young gen");
532 // called from os::print_location by find or VMError
533 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
534 Unimplemented();
535 } else if (old_gen()->is_in_reserved(addr)) {
536 assert(old_gen()->is_in(addr),
537 "addr should be in allocated part of old gen");
538 return old_gen()->start_array()->object_start((HeapWord*)addr);
539 }
540 return 0;
541 }
542
543 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
544 return block_start(addr) == addr;
545 }
546
547 jlong ParallelScavengeHeap::millis_since_last_gc() {
548 return PSParallelCompact::millis_since_last_gc();
549 }
550
551 void ParallelScavengeHeap::prepare_for_verify() {
552 ensure_parsability(false); // no need to retire TLABs for verification
553 }
554
555 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
556 PSOldGen* old = old_gen();
557 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
558 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
559 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
560
561 PSYoungGen* young = young_gen();
562 VirtualSpaceSummary young_summary(young->reserved().start(),
563 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
564
565 MutableSpace* eden = young_gen()->eden_space();
566 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
567
568 MutableSpace* from = young_gen()->from_space();
571 MutableSpace* to = young_gen()->to_space();
572 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
573
574 VirtualSpaceSummary heap_summary = create_heap_space_summary();
575 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
576 }
577
578 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
579 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
580 }
581
582 void ParallelScavengeHeap::print_on(outputStream* st) const {
583 young_gen()->print_on(st);
584 old_gen()->print_on(st);
585 MetaspaceUtils::print_on(st);
586 }
587
588 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
589 this->CollectedHeap::print_on_error(st);
590
591 st->cr();
592 PSParallelCompact::print_on_error(st);
593 }
594
595 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
596 ParallelScavengeHeap::heap()->workers().threads_do(tc);
597 }
598
599 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
600 ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st);
601 }
602
603 void ParallelScavengeHeap::print_tracing_info() const {
604 AdaptiveSizePolicyOutput::print();
605 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
606 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
607 }
608
609 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
610 const PSYoungGen* const young = young_gen();
611 const MutableSpace* const eden = young->eden_space();
612 const MutableSpace* const from = young->from_space();
613 const PSOldGen* const old = old_gen();
614
615 return PreGenGCValues(young->used_in_bytes(),
616 young->capacity_in_bytes(),
617 eden->used_in_bytes(),
618 eden->capacity_in_bytes(),
619 from->used_in_bytes(),
620 from->capacity_in_bytes(),
621 old->used_in_bytes(),
622 old->capacity_in_bytes());
623 }
624
625 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
626 const PSYoungGen* const young = young_gen();
|