src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

Print this page




 521             size_policy->calculated_eden_size_in_bytes());
 522           counters->update_old_promo_size(
 523             size_policy->calculated_promo_size_in_bytes());
 524           counters->update_old_capacity(old_gen->capacity_in_bytes());
 525           counters->update_young_capacity(young_gen->capacity_in_bytes());
 526           counters->update_survived(survived);
 527           counters->update_promoted(promoted);
 528           counters->update_survivor_overflowed(_survivor_overflow);
 529         }
 530 
 531         size_t survivor_limit =
 532           size_policy->max_survivor_size(young_gen->max_size());
 533         _tenuring_threshold =
 534           size_policy->compute_survivor_space_size_and_threshold(
 535                                                            _survivor_overflow,
 536                                                            _tenuring_threshold,
 537                                                            survivor_limit);
 538 
 539        if (PrintTenuringDistribution) {
 540          gclog_or_tty->cr();
 541          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
 542                                 size_policy->calculated_survivor_size_in_bytes(),
 543                                 _tenuring_threshold, MaxTenuringThreshold);
 544        }
 545 
 546         if (UsePerfData) {
 547           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 548           counters->update_tenuring_threshold(_tenuring_threshold);
 549           counters->update_survivor_size_counters();
 550         }
 551 
 552         // Do call at minor collections?
 553         // Don't check if the size_policy is ready at this
 554         // level.  Let the size_policy check that internally.
 555         if (UseAdaptiveSizePolicy &&
 556             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 557             ((gc_cause != GCCause::_java_lang_system_gc) ||
 558               UseAdaptiveSizePolicyWithSystemGC)) {
 559 
 560           // Calculate optimial free space amounts
 561           assert(young_gen->max_size() >


 801     _consecutive_skipped_scavenges = 0;
 802   } else {
 803     _consecutive_skipped_scavenges++;
 804     if (UsePerfData) {
 805       counters->update_scavenge_skipped(promoted_too_large);
 806     }
 807   }
 808   return result;
 809 }
 810 
 811   // Used to add tasks
 812 GCTaskManager* const PSScavenge::gc_task_manager() {
 813   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 814    "shouldn't return NULL");
 815   return ParallelScavengeHeap::gc_task_manager();
 816 }
 817 
 818 void PSScavenge::initialize() {
 819   // Arguments must have been parsed
 820 
 821   if (AlwaysTenure) {
 822     _tenuring_threshold = 0;
 823   } else if (NeverTenure) {
 824     _tenuring_threshold = markOopDesc::max_age + 1;
 825   } else {
 826     // We want to smooth out our startup times for the AdaptiveSizePolicy
 827     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 828                                                     MaxTenuringThreshold;
 829   }
 830 
 831   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 832   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 833 
 834   PSYoungGen* young_gen = heap->young_gen();
 835   PSOldGen* old_gen = heap->old_gen();
 836 
 837   // Set boundary between young_gen and old_gen
 838   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 839          "old above young");
 840   set_young_generation_boundary(young_gen->eden_space()->bottom());
 841 
 842   // Initialize ref handling object for scavenging.
 843   MemRegion mr = young_gen->reserved();
 844 


 521             size_policy->calculated_eden_size_in_bytes());
 522           counters->update_old_promo_size(
 523             size_policy->calculated_promo_size_in_bytes());
 524           counters->update_old_capacity(old_gen->capacity_in_bytes());
 525           counters->update_young_capacity(young_gen->capacity_in_bytes());
 526           counters->update_survived(survived);
 527           counters->update_promoted(promoted);
 528           counters->update_survivor_overflowed(_survivor_overflow);
 529         }
 530 
 531         size_t survivor_limit =
 532           size_policy->max_survivor_size(young_gen->max_size());
 533         _tenuring_threshold =
 534           size_policy->compute_survivor_space_size_and_threshold(
 535                                                            _survivor_overflow,
 536                                                            _tenuring_threshold,
 537                                                            survivor_limit);
 538 
 539        if (PrintTenuringDistribution) {
 540          gclog_or_tty->cr();
 541          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold %u)",
 542                                 size_policy->calculated_survivor_size_in_bytes(),
 543                                 _tenuring_threshold, MaxTenuringThreshold);
 544        }
 545 
 546         if (UsePerfData) {
 547           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 548           counters->update_tenuring_threshold(_tenuring_threshold);
 549           counters->update_survivor_size_counters();
 550         }
 551 
 552         // Do call at minor collections?
 553         // Don't check if the size_policy is ready at this
 554         // level.  Let the size_policy check that internally.
 555         if (UseAdaptiveSizePolicy &&
 556             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 557             ((gc_cause != GCCause::_java_lang_system_gc) ||
 558               UseAdaptiveSizePolicyWithSystemGC)) {
 559 
 560           // Calculate optimial free space amounts
 561           assert(young_gen->max_size() >


 801     _consecutive_skipped_scavenges = 0;
 802   } else {
 803     _consecutive_skipped_scavenges++;
 804     if (UsePerfData) {
 805       counters->update_scavenge_skipped(promoted_too_large);
 806     }
 807   }
 808   return result;
 809 }
 810 
 811   // Used to add tasks
 812 GCTaskManager* const PSScavenge::gc_task_manager() {
 813   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 814    "shouldn't return NULL");
 815   return ParallelScavengeHeap::gc_task_manager();
 816 }
 817 
 818 void PSScavenge::initialize() {
 819   // Arguments must have been parsed
 820 
 821   if (AlwaysTenure || NeverTenure) {
 822     _tenuring_threshold = MaxTenuringThreshold;


 823   } else {
 824     // We want to smooth out our startup times for the AdaptiveSizePolicy
 825     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 826                                                     MaxTenuringThreshold;
 827   }
 828 
 829   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 830   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 831 
 832   PSYoungGen* young_gen = heap->young_gen();
 833   PSOldGen* old_gen = heap->old_gen();
 834 
 835   // Set boundary between young_gen and old_gen
 836   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 837          "old above young");
 838   set_young_generation_boundary(young_gen->eden_space()->bottom());
 839 
 840   // Initialize ref handling object for scavenging.
 841   MemRegion mr = young_gen->reserved();
 842