522 { 523 MutexLocker ml(Heap_lock); 524 // This value is guarded by the Heap_lock 525 gc_count = total_collections(); 526 full_gc_count = total_full_collections(); 527 } 528 529 if (GCLocker::should_discard(cause, gc_count)) { 530 return; 531 } 532 533 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 534 VMThread::execute(&op); 535 } 536 537 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 538 young_gen()->object_iterate(cl); 539 old_gen()->object_iterate(cl); 540 } 541 542 void ParallelScavengeHeap::run_task(AbstractGangTask* task) { 543 _workers.run_task(task); 544 } 545 546 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 547 if (young_gen()->is_in_reserved(addr)) { 548 assert(young_gen()->is_in(addr), 549 "addr should be in allocated part of young gen"); 550 // called from os::print_location by find or VMError 551 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 552 Unimplemented(); 553 } else if (old_gen()->is_in_reserved(addr)) { 554 assert(old_gen()->is_in(addr), 555 "addr should be in allocated part of old gen"); 556 return old_gen()->start_array()->object_start((HeapWord*)addr); 557 } 558 return 0; 559 } 560 561 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 562 return block_start(addr) == addr; 563 } 564 565 jlong ParallelScavengeHeap::millis_since_last_gc() { 599 600 void ParallelScavengeHeap::print_on(outputStream* st) const { 601 if (young_gen() != NULL) { 602 young_gen()->print_on(st); 603 } 604 if (old_gen() != NULL) { 605 old_gen()->print_on(st); 606 } 607 MetaspaceUtils::print_on(st); 608 } 609 610 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 611 this->CollectedHeap::print_on_error(st); 612 613 st->cr(); 614 PSParallelCompact::print_on_error(st); 615 } 616 617 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 618 ParallelScavengeHeap::heap()->workers().threads_do(tc); 619 } 620 621 void ParallelScavengeHeap::print_tracing_info() const { 622 AdaptiveSizePolicyOutput::print(); 623 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 624 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds()); 625 } 626 627 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const { 628 const PSYoungGen* const young = young_gen(); 629 const MutableSpace* const eden = young->eden_space(); 630 const MutableSpace* const from = young->from_space(); 631 const PSOldGen* const old = old_gen(); 632 633 return PreGenGCValues(young->used_in_bytes(), 634 young->capacity_in_bytes(), 635 eden->used_in_bytes(), 636 eden->capacity_in_bytes(), 637 from->used_in_bytes(), 638 from->capacity_in_bytes(), | 522 { 523 MutexLocker ml(Heap_lock); 524 // This value is guarded by the Heap_lock 525 gc_count = total_collections(); 526 full_gc_count = total_full_collections(); 527 } 528 529 if (GCLocker::should_discard(cause, gc_count)) { 530 return; 531 } 532 533 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 534 VMThread::execute(&op); 535 } 536 537 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 538 young_gen()->object_iterate(cl); 539 old_gen()->object_iterate(cl); 540 } 541 542 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 543 if (young_gen()->is_in_reserved(addr)) { 544 assert(young_gen()->is_in(addr), 545 "addr should be in allocated part of young gen"); 546 // called from os::print_location by find or VMError 547 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 548 Unimplemented(); 549 } else if (old_gen()->is_in_reserved(addr)) { 550 assert(old_gen()->is_in(addr), 551 "addr should be in allocated part of old gen"); 552 return old_gen()->start_array()->object_start((HeapWord*)addr); 553 } 554 return 0; 555 } 556 557 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 558 return block_start(addr) == addr; 559 } 560 561 jlong ParallelScavengeHeap::millis_since_last_gc() { 595 596 void ParallelScavengeHeap::print_on(outputStream* st) const { 597 if (young_gen() != NULL) { 598 young_gen()->print_on(st); 599 } 600 if (old_gen() != NULL) { 601 old_gen()->print_on(st); 602 } 603 MetaspaceUtils::print_on(st); 604 } 605 606 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 607 this->CollectedHeap::print_on_error(st); 608 609 st->cr(); 610 PSParallelCompact::print_on_error(st); 611 } 612 613 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 614 ParallelScavengeHeap::heap()->workers().threads_do(tc); 615 } 616 617 void ParallelScavengeHeap::run_task(AbstractGangTask* task) { 618 _workers.run_task(task); 619 } 620 621 void ParallelScavengeHeap::print_tracing_info() const { 622 AdaptiveSizePolicyOutput::print(); 623 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 624 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds()); 625 } 626 627 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const { 628 const PSYoungGen* const young = young_gen(); 629 const MutableSpace* const eden = young->eden_space(); 630 const MutableSpace* const from = young->from_space(); 631 const PSOldGen* const old = old_gen(); 632 633 return PreGenGCValues(young->used_in_bytes(), 634 young->capacity_in_bytes(), 635 eden->used_in_bytes(), 636 eden->capacity_in_bytes(), 637 from->used_in_bytes(), 638 from->capacity_in_bytes(), |