522 {
523 MutexLocker ml(Heap_lock);
524 // This value is guarded by the Heap_lock
525 gc_count = total_collections();
526 full_gc_count = total_full_collections();
527 }
528
529 if (GCLocker::should_discard(cause, gc_count)) {
530 return;
531 }
532
533 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
534 VMThread::execute(&op);
535 }
536
537 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
538 young_gen()->object_iterate(cl);
539 old_gen()->object_iterate(cl);
540 }
541
542
543 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
544 if (young_gen()->is_in_reserved(addr)) {
545 assert(young_gen()->is_in(addr),
546 "addr should be in allocated part of young gen");
547 // called from os::print_location by find or VMError
548 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
549 Unimplemented();
550 } else if (old_gen()->is_in_reserved(addr)) {
551 assert(old_gen()->is_in(addr),
552 "addr should be in allocated part of old gen");
553 return old_gen()->start_array()->object_start((HeapWord*)addr);
554 }
555 return 0;
556 }
557
558 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
559 return block_start(addr) == addr;
560 }
561
562 void ParallelScavengeHeap::prepare_for_verify() {
592
593 void ParallelScavengeHeap::print_on(outputStream* st) const {
594 if (young_gen() != NULL) {
595 young_gen()->print_on(st);
596 }
597 if (old_gen() != NULL) {
598 old_gen()->print_on(st);
599 }
600 MetaspaceUtils::print_on(st);
601 }
602
603 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
604 this->CollectedHeap::print_on_error(st);
605
606 st->cr();
607 PSParallelCompact::print_on_error(st);
608 }
609
610 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
611 ParallelScavengeHeap::heap()->workers().threads_do(tc);
612 }
613
614 void ParallelScavengeHeap::print_tracing_info() const {
615 AdaptiveSizePolicyOutput::print();
616 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
617 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
618 }
619
620 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
621 const PSYoungGen* const young = young_gen();
622 const MutableSpace* const eden = young->eden_space();
623 const MutableSpace* const from = young->from_space();
624 const PSOldGen* const old = old_gen();
625
626 return PreGenGCValues(young->used_in_bytes(),
627 young->capacity_in_bytes(),
628 eden->used_in_bytes(),
629 eden->capacity_in_bytes(),
630 from->used_in_bytes(),
631 from->capacity_in_bytes(),
|
522 {
523 MutexLocker ml(Heap_lock);
524 // This value is guarded by the Heap_lock
525 gc_count = total_collections();
526 full_gc_count = total_full_collections();
527 }
528
529 if (GCLocker::should_discard(cause, gc_count)) {
530 return;
531 }
532
533 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
534 VMThread::execute(&op);
535 }
536
537 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
538 young_gen()->object_iterate(cl);
539 old_gen()->object_iterate(cl);
540 }
541
542 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
543 if (young_gen()->is_in_reserved(addr)) {
544 assert(young_gen()->is_in(addr),
545 "addr should be in allocated part of young gen");
546 // called from os::print_location by find or VMError
547 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
548 Unimplemented();
549 } else if (old_gen()->is_in_reserved(addr)) {
550 assert(old_gen()->is_in(addr),
551 "addr should be in allocated part of old gen");
552 return old_gen()->start_array()->object_start((HeapWord*)addr);
553 }
554 return 0;
555 }
556
557 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
558 return block_start(addr) == addr;
559 }
560
561 void ParallelScavengeHeap::prepare_for_verify() {
591
592 void ParallelScavengeHeap::print_on(outputStream* st) const {
593 if (young_gen() != NULL) {
594 young_gen()->print_on(st);
595 }
596 if (old_gen() != NULL) {
597 old_gen()->print_on(st);
598 }
599 MetaspaceUtils::print_on(st);
600 }
601
602 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
603 this->CollectedHeap::print_on_error(st);
604
605 st->cr();
606 PSParallelCompact::print_on_error(st);
607 }
608
609 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
610 ParallelScavengeHeap::heap()->workers().threads_do(tc);
611 }
612
613 void ParallelScavengeHeap::run_task(AbstractGangTask* task) {
614 _workers.run_task(task);
615 }
616
617 void ParallelScavengeHeap::print_tracing_info() const {
618 AdaptiveSizePolicyOutput::print();
619 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
620 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
621 }
622
623 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
624 const PSYoungGen* const young = young_gen();
625 const MutableSpace* const eden = young->eden_space();
626 const MutableSpace* const from = young->from_space();
627 const PSOldGen* const old = old_gen();
628
629 return PreGenGCValues(young->used_in_bytes(),
630 young->capacity_in_bytes(),
631 eden->used_in_bytes(),
632 eden->capacity_in_bytes(),
633 from->used_in_bytes(),
634 from->capacity_in_bytes(),
|