src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

Print this page




 520             size_policy->calculated_eden_size_in_bytes());
 521           counters->update_old_promo_size(
 522             size_policy->calculated_promo_size_in_bytes());
 523           counters->update_old_capacity(old_gen->capacity_in_bytes());
 524           counters->update_young_capacity(young_gen->capacity_in_bytes());
 525           counters->update_survived(survived);
 526           counters->update_promoted(promoted);
 527           counters->update_survivor_overflowed(_survivor_overflow);
 528         }
 529 
 530         size_t survivor_limit =
 531           size_policy->max_survivor_size(young_gen->max_size());
 532         _tenuring_threshold =
 533           size_policy->compute_survivor_space_size_and_threshold(
 534                                                            _survivor_overflow,
 535                                                            _tenuring_threshold,
 536                                                            survivor_limit);
 537 
 538        if (PrintTenuringDistribution) {
 539          gclog_or_tty->cr();
 540          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",

 541                                 size_policy->calculated_survivor_size_in_bytes(),
 542                                 _tenuring_threshold, MaxTenuringThreshold);
 543        }
 544 
 545         if (UsePerfData) {
 546           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 547           counters->update_tenuring_threshold(_tenuring_threshold);
 548           counters->update_survivor_size_counters();
 549         }
 550 
 551         // Do call at minor collections?
 552         // Don't check if the size_policy is ready at this
 553         // level.  Let the size_policy check that internally.
 554         if (UseAdaptiveSizePolicy &&
 555             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 556             ((gc_cause != GCCause::_java_lang_system_gc) ||
 557               UseAdaptiveSizePolicyWithSystemGC)) {
 558 
 559           // Calculate optimial free space amounts
 560           assert(young_gen->max_size() >


 800     _consecutive_skipped_scavenges = 0;
 801   } else {
 802     _consecutive_skipped_scavenges++;
 803     if (UsePerfData) {
 804       counters->update_scavenge_skipped(promoted_too_large);
 805     }
 806   }
 807   return result;
 808 }
 809 
 810   // Used to add tasks
 811 GCTaskManager* const PSScavenge::gc_task_manager() {
 812   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 813    "shouldn't return NULL");
 814   return ParallelScavengeHeap::gc_task_manager();
 815 }
 816 
 817 void PSScavenge::initialize() {
 818   // Arguments must have been parsed
 819 
 820   if (AlwaysTenure) {
 821     _tenuring_threshold = 0;
 822   } else if (NeverTenure) {
 823     _tenuring_threshold = markOopDesc::max_age + 1;
 824   } else {
 825     // We want to smooth out our startup times for the AdaptiveSizePolicy
 826     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 827                                                     MaxTenuringThreshold;
 828   }
 829 
 830   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 831   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 832 
 833   PSYoungGen* young_gen = heap->young_gen();
 834   PSOldGen* old_gen = heap->old_gen();
 835 
 836   // Set boundary between young_gen and old_gen
 837   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 838          "old above young");
 839   set_young_generation_boundary(young_gen->eden_space()->bottom());
 840 
 841   // Initialize ref handling object for scavenging.
 842   MemRegion mr = young_gen->reserved();
 843 


 520             size_policy->calculated_eden_size_in_bytes());
 521           counters->update_old_promo_size(
 522             size_policy->calculated_promo_size_in_bytes());
 523           counters->update_old_capacity(old_gen->capacity_in_bytes());
 524           counters->update_young_capacity(young_gen->capacity_in_bytes());
 525           counters->update_survived(survived);
 526           counters->update_promoted(promoted);
 527           counters->update_survivor_overflowed(_survivor_overflow);
 528         }
 529 
 530         size_t survivor_limit =
 531           size_policy->max_survivor_size(young_gen->max_size());
 532         _tenuring_threshold =
 533           size_policy->compute_survivor_space_size_and_threshold(
 534                                                            _survivor_overflow,
 535                                                            _tenuring_threshold,
 536                                                            survivor_limit);
 537 
 538        if (PrintTenuringDistribution) {
 539          gclog_or_tty->cr();
 540          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold "
 541                                 UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
 542                                 size_policy->calculated_survivor_size_in_bytes(),
 543                                 _tenuring_threshold, MaxTenuringThreshold);
 544        }
 545 
 546         if (UsePerfData) {
 547           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 548           counters->update_tenuring_threshold(_tenuring_threshold);
 549           counters->update_survivor_size_counters();
 550         }
 551 
 552         // Do call at minor collections?
 553         // Don't check if the size_policy is ready at this
 554         // level.  Let the size_policy check that internally.
 555         if (UseAdaptiveSizePolicy &&
 556             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 557             ((gc_cause != GCCause::_java_lang_system_gc) ||
 558               UseAdaptiveSizePolicyWithSystemGC)) {
 559 
 560           // Calculate optimial free space amounts
 561           assert(young_gen->max_size() >


 801     _consecutive_skipped_scavenges = 0;
 802   } else {
 803     _consecutive_skipped_scavenges++;
 804     if (UsePerfData) {
 805       counters->update_scavenge_skipped(promoted_too_large);
 806     }
 807   }
 808   return result;
 809 }
 810 
 811   // Used to add tasks
 812 GCTaskManager* const PSScavenge::gc_task_manager() {
 813   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 814    "shouldn't return NULL");
 815   return ParallelScavengeHeap::gc_task_manager();
 816 }
 817 
 818 void PSScavenge::initialize() {
 819   // Arguments must have been parsed
 820 
 821   if (AlwaysTenure || NeverTenure) {
 822     assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
 823         err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is ", MaxTenuringThreshold));
 824     _tenuring_threshold = MaxTenuringThreshold;
 825   } else {
 826     // We want to smooth out our startup times for the AdaptiveSizePolicy
 827     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 828                                                     MaxTenuringThreshold;
 829   }
 830 
 831   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 832   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 833 
 834   PSYoungGen* young_gen = heap->young_gen();
 835   PSOldGen* old_gen = heap->old_gen();
 836 
 837   // Set boundary between young_gen and old_gen
 838   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 839          "old above young");
 840   set_young_generation_boundary(young_gen->eden_space()->bottom());
 841 
 842   // Initialize ref handling object for scavenging.
 843   MemRegion mr = young_gen->reserved();
 844