14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "gc/parallel/adjoiningGenerations.hpp"
28 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/generationSizer.hpp"
31 #include "gc/parallel/objectStartArray.inline.hpp"
32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
34 #include "gc/parallel/psMarkSweep.hpp"
35 #include "gc/parallel/psMemoryPool.hpp"
36 #include "gc/parallel/psParallelCompact.inline.hpp"
37 #include "gc/parallel/psPromotionManager.hpp"
38 #include "gc/parallel/psScavenge.hpp"
39 #include "gc/parallel/vmPSOperations.hpp"
40 #include "gc/shared/gcHeapSummary.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcWhen.hpp"
43 #include "logging/log.hpp"
44 #include "memory/metaspaceCounters.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/vmThread.hpp"
49 #include "services/memoryManager.hpp"
50 #include "services/memTracker.hpp"
51 #include "utilities/macros.hpp"
52 #include "utilities/vmError.hpp"
53
54 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
139
140 _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
141 _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
142
143 _old_manager->add_pool(_eden_pool);
144 _old_manager->add_pool(_survivor_pool);
145 _old_manager->add_pool(_old_pool);
146
147 _young_manager->add_pool(_eden_pool);
148 _young_manager->add_pool(_survivor_pool);
149
150 }
151
152 void ParallelScavengeHeap::post_initialize() {
153 CollectedHeap::post_initialize();
154 // Need to init the tenuring threshold
155 PSScavenge::initialize();
156 if (UseParallelOldGC) {
157 PSParallelCompact::post_initialize();
158 } else {
159 PSMarkSweep::initialize();
160 }
161 PSPromotionManager::initialize();
162 }
163
164 void ParallelScavengeHeap::update_counters() {
165 young_gen()->update_counters();
166 old_gen()->update_counters();
167 MetaspaceCounters::update_performance_counters();
168 CompressedClassSpaceCounters::update_performance_counters();
169 }
170
171 size_t ParallelScavengeHeap::capacity() const {
172 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
173 return value;
174 }
175
176 size_t ParallelScavengeHeap::used() const {
177 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
178 return value;
179 }
390 // number of times before doing a GC.
391 if (_death_march_count > 0) {
392 if (_death_march_count < 64) {
393 ++_death_march_count;
394 return old_gen()->allocate(size);
395 } else {
396 _death_march_count = 0;
397 }
398 }
399 return NULL;
400 }
401
402 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
403 if (UseParallelOldGC) {
404 // The do_full_collection() parameter clear_all_soft_refs
405 // is interpreted here as maximum_compaction which will
406 // cause SoftRefs to be cleared.
407 bool maximum_compaction = clear_all_soft_refs;
408 PSParallelCompact::invoke(maximum_compaction);
409 } else {
410 PSMarkSweep::invoke(clear_all_soft_refs);
411 }
412 }
413
414 // Failed allocation policy. Must be called from the VM thread, and
415 // only at a safepoint! Note that this method has policy for allocation
416 // flow, and NOT collection policy. So we do not check for gc collection
417 // time over limit here, that is the responsibility of the heap specific
418 // collection methods. This method decides where to attempt allocations,
419 // and when to attempt collections, but no collection specific policy.
420 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
421 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
422 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
423 assert(!is_gc_active(), "not reentrant");
424 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
425
426 // We assume that allocation in eden will fail unless we collect.
427
428 // First level allocation failure, scavenge and allocate in young gen.
429 GCCauseSetter gccs(this, GCCause::_allocation_failure);
430 const bool invoked_full_gc = PSScavenge::invoke();
524 Unimplemented();
525 } else if (old_gen()->is_in_reserved(addr)) {
526 assert(old_gen()->is_in(addr),
527 "addr should be in allocated part of old gen");
528 return old_gen()->start_array()->object_start((HeapWord*)addr);
529 }
530 return 0;
531 }
532
533 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
534 return oop(addr)->size();
535 }
536
537 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
538 return block_start(addr) == addr;
539 }
540
541 jlong ParallelScavengeHeap::millis_since_last_gc() {
542 return UseParallelOldGC ?
543 PSParallelCompact::millis_since_last_gc() :
544 PSMarkSweep::millis_since_last_gc();
545 }
546
547 void ParallelScavengeHeap::prepare_for_verify() {
548 ensure_parsability(false); // no need to retire TLABs for verification
549 }
550
551 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
552 PSOldGen* old = old_gen();
553 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
554 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
555 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
556
557 PSYoungGen* young = young_gen();
558 VirtualSpaceSummary young_summary(young->reserved().start(),
559 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
560
561 MutableSpace* eden = young_gen()->eden_space();
562 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
563
564 MutableSpace* from = young_gen()->from_space();
581 this->CollectedHeap::print_on_error(st);
582
583 if (UseParallelOldGC) {
584 st->cr();
585 PSParallelCompact::print_on_error(st);
586 }
587 }
588
589 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
590 PSScavenge::gc_task_manager()->threads_do(tc);
591 }
592
593 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
594 PSScavenge::gc_task_manager()->print_threads_on(st);
595 }
596
597 void ParallelScavengeHeap::print_tracing_info() const {
598 AdaptiveSizePolicyOutput::print();
599 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
600 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
601 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
602 }
603
604
605 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
606 // Why do we need the total_collections()-filter below?
607 if (total_collections() > 0) {
608 log_debug(gc, verify)("Tenured");
609 old_gen()->verify();
610
611 log_debug(gc, verify)("Eden");
612 young_gen()->verify();
613 }
614 }
615
616 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
617 const PSHeapSummary& heap_summary = create_ps_heap_summary();
618 gc_tracer->report_gc_heap_summary(when, heap_summary);
619
620 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
621 gc_tracer->report_metaspace_summary(when, metaspace_summary);
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "gc/parallel/adjoiningGenerations.hpp"
28 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/generationSizer.hpp"
31 #include "gc/parallel/objectStartArray.inline.hpp"
32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
34 #include "gc/parallel/psMarkSweepProxy.hpp"
35 #include "gc/parallel/psMemoryPool.hpp"
36 #include "gc/parallel/psParallelCompact.inline.hpp"
37 #include "gc/parallel/psPromotionManager.hpp"
38 #include "gc/parallel/psScavenge.hpp"
39 #include "gc/parallel/vmPSOperations.hpp"
40 #include "gc/shared/gcHeapSummary.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcWhen.hpp"
43 #include "logging/log.hpp"
44 #include "memory/metaspaceCounters.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/vmThread.hpp"
49 #include "services/memoryManager.hpp"
50 #include "services/memTracker.hpp"
51 #include "utilities/macros.hpp"
52 #include "utilities/vmError.hpp"
53
54 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
139
140 _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
141 _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
142
143 _old_manager->add_pool(_eden_pool);
144 _old_manager->add_pool(_survivor_pool);
145 _old_manager->add_pool(_old_pool);
146
147 _young_manager->add_pool(_eden_pool);
148 _young_manager->add_pool(_survivor_pool);
149
150 }
151
152 void ParallelScavengeHeap::post_initialize() {
153 CollectedHeap::post_initialize();
154 // Need to init the tenuring threshold
155 PSScavenge::initialize();
156 if (UseParallelOldGC) {
157 PSParallelCompact::post_initialize();
158 } else {
159 PSMarkSweepProxy::initialize();
160 }
161 PSPromotionManager::initialize();
162 }
163
164 void ParallelScavengeHeap::update_counters() {
165 young_gen()->update_counters();
166 old_gen()->update_counters();
167 MetaspaceCounters::update_performance_counters();
168 CompressedClassSpaceCounters::update_performance_counters();
169 }
170
171 size_t ParallelScavengeHeap::capacity() const {
172 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
173 return value;
174 }
175
176 size_t ParallelScavengeHeap::used() const {
177 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
178 return value;
179 }
390 // number of times before doing a GC.
391 if (_death_march_count > 0) {
392 if (_death_march_count < 64) {
393 ++_death_march_count;
394 return old_gen()->allocate(size);
395 } else {
396 _death_march_count = 0;
397 }
398 }
399 return NULL;
400 }
401
402 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
403 if (UseParallelOldGC) {
404 // The do_full_collection() parameter clear_all_soft_refs
405 // is interpreted here as maximum_compaction which will
406 // cause SoftRefs to be cleared.
407 bool maximum_compaction = clear_all_soft_refs;
408 PSParallelCompact::invoke(maximum_compaction);
409 } else {
410 PSMarkSweepProxy::invoke(clear_all_soft_refs);
411 }
412 }
413
414 // Failed allocation policy. Must be called from the VM thread, and
415 // only at a safepoint! Note that this method has policy for allocation
416 // flow, and NOT collection policy. So we do not check for gc collection
417 // time over limit here, that is the responsibility of the heap specific
418 // collection methods. This method decides where to attempt allocations,
419 // and when to attempt collections, but no collection specific policy.
420 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
421 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
422 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
423 assert(!is_gc_active(), "not reentrant");
424 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
425
426 // We assume that allocation in eden will fail unless we collect.
427
428 // First level allocation failure, scavenge and allocate in young gen.
429 GCCauseSetter gccs(this, GCCause::_allocation_failure);
430 const bool invoked_full_gc = PSScavenge::invoke();
524 Unimplemented();
525 } else if (old_gen()->is_in_reserved(addr)) {
526 assert(old_gen()->is_in(addr),
527 "addr should be in allocated part of old gen");
528 return old_gen()->start_array()->object_start((HeapWord*)addr);
529 }
530 return 0;
531 }
532
533 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
534 return oop(addr)->size();
535 }
536
537 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
538 return block_start(addr) == addr;
539 }
540
541 jlong ParallelScavengeHeap::millis_since_last_gc() {
542 return UseParallelOldGC ?
543 PSParallelCompact::millis_since_last_gc() :
544 PSMarkSweepProxy::millis_since_last_gc();
545 }
546
547 void ParallelScavengeHeap::prepare_for_verify() {
548 ensure_parsability(false); // no need to retire TLABs for verification
549 }
550
551 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
552 PSOldGen* old = old_gen();
553 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
554 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
555 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
556
557 PSYoungGen* young = young_gen();
558 VirtualSpaceSummary young_summary(young->reserved().start(),
559 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
560
561 MutableSpace* eden = young_gen()->eden_space();
562 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
563
564 MutableSpace* from = young_gen()->from_space();
581 this->CollectedHeap::print_on_error(st);
582
583 if (UseParallelOldGC) {
584 st->cr();
585 PSParallelCompact::print_on_error(st);
586 }
587 }
588
589 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
590 PSScavenge::gc_task_manager()->threads_do(tc);
591 }
592
593 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
594 PSScavenge::gc_task_manager()->print_threads_on(st);
595 }
596
597 void ParallelScavengeHeap::print_tracing_info() const {
598 AdaptiveSizePolicyOutput::print();
599 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
600 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
601 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
602 }
603
604
605 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
606 // Why do we need the total_collections()-filter below?
607 if (total_collections() > 0) {
608 log_debug(gc, verify)("Tenured");
609 old_gen()->verify();
610
611 log_debug(gc, verify)("Eden");
612 young_gen()->verify();
613 }
614 }
615
616 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
617 const PSHeapSummary& heap_summary = create_ps_heap_summary();
618 gc_tracer->report_gc_heap_summary(when, heap_summary);
619
620 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
621 gc_tracer->report_metaspace_summary(when, metaspace_summary);
|