520 size_policy->calculated_eden_size_in_bytes());
521 counters->update_old_promo_size(
522 size_policy->calculated_promo_size_in_bytes());
523 counters->update_old_capacity(old_gen->capacity_in_bytes());
524 counters->update_young_capacity(young_gen->capacity_in_bytes());
525 counters->update_survived(survived);
526 counters->update_promoted(promoted);
527 counters->update_survivor_overflowed(_survivor_overflow);
528 }
529
530 size_t survivor_limit =
531 size_policy->max_survivor_size(young_gen->max_size());
532 _tenuring_threshold =
533 size_policy->compute_survivor_space_size_and_threshold(
534 _survivor_overflow,
535 _tenuring_threshold,
536 survivor_limit);
537
538 if (PrintTenuringDistribution) {
539 gclog_or_tty->cr();
540 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
541 size_policy->calculated_survivor_size_in_bytes(),
542 _tenuring_threshold, MaxTenuringThreshold);
543 }
544
545 if (UsePerfData) {
546 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
547 counters->update_tenuring_threshold(_tenuring_threshold);
548 counters->update_survivor_size_counters();
549 }
550
551 // Do call at minor collections?
552 // Don't check if the size_policy is ready at this
553 // level. Let the size_policy check that internally.
554 if (UseAdaptiveSizePolicy &&
555 UseAdaptiveGenerationSizePolicyAtMinorCollection &&
556 ((gc_cause != GCCause::_java_lang_system_gc) ||
557 UseAdaptiveSizePolicyWithSystemGC)) {
558
559 // Calculate optimial free space amounts
560 assert(young_gen->max_size() >
800 _consecutive_skipped_scavenges = 0;
801 } else {
802 _consecutive_skipped_scavenges++;
803 if (UsePerfData) {
804 counters->update_scavenge_skipped(promoted_too_large);
805 }
806 }
807 return result;
808 }
809
810 // Used to add tasks
811 GCTaskManager* const PSScavenge::gc_task_manager() {
812 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
813 "shouldn't return NULL");
814 return ParallelScavengeHeap::gc_task_manager();
815 }
816
817 void PSScavenge::initialize() {
818 // Arguments must have been parsed
819
820 if (AlwaysTenure) {
821 _tenuring_threshold = 0;
822 } else if (NeverTenure) {
823 _tenuring_threshold = markOopDesc::max_age + 1;
824 } else {
825 // We want to smooth out our startup times for the AdaptiveSizePolicy
826 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
827 MaxTenuringThreshold;
828 }
829
830 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
831 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
832
833 PSYoungGen* young_gen = heap->young_gen();
834 PSOldGen* old_gen = heap->old_gen();
835
836 // Set boundary between young_gen and old_gen
837 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
838 "old above young");
839 set_young_generation_boundary(young_gen->eden_space()->bottom());
840
841 // Initialize ref handling object for scavenging.
842 MemRegion mr = young_gen->reserved();
843
|
520 size_policy->calculated_eden_size_in_bytes());
521 counters->update_old_promo_size(
522 size_policy->calculated_promo_size_in_bytes());
523 counters->update_old_capacity(old_gen->capacity_in_bytes());
524 counters->update_young_capacity(young_gen->capacity_in_bytes());
525 counters->update_survived(survived);
526 counters->update_promoted(promoted);
527 counters->update_survivor_overflowed(_survivor_overflow);
528 }
529
530 size_t survivor_limit =
531 size_policy->max_survivor_size(young_gen->max_size());
532 _tenuring_threshold =
533 size_policy->compute_survivor_space_size_and_threshold(
534 _survivor_overflow,
535 _tenuring_threshold,
536 survivor_limit);
537
538 if (PrintTenuringDistribution) {
539 gclog_or_tty->cr();
540 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold %u)",
541 size_policy->calculated_survivor_size_in_bytes(),
542 _tenuring_threshold, MaxTenuringThreshold);
543 }
544
545 if (UsePerfData) {
546 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
547 counters->update_tenuring_threshold(_tenuring_threshold);
548 counters->update_survivor_size_counters();
549 }
550
551 // Do call at minor collections?
552 // Don't check if the size_policy is ready at this
553 // level. Let the size_policy check that internally.
554 if (UseAdaptiveSizePolicy &&
555 UseAdaptiveGenerationSizePolicyAtMinorCollection &&
556 ((gc_cause != GCCause::_java_lang_system_gc) ||
557 UseAdaptiveSizePolicyWithSystemGC)) {
558
559 // Calculate optimial free space amounts
560 assert(young_gen->max_size() >
800 _consecutive_skipped_scavenges = 0;
801 } else {
802 _consecutive_skipped_scavenges++;
803 if (UsePerfData) {
804 counters->update_scavenge_skipped(promoted_too_large);
805 }
806 }
807 return result;
808 }
809
810 // Used to add tasks
811 GCTaskManager* const PSScavenge::gc_task_manager() {
812 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
813 "shouldn't return NULL");
814 return ParallelScavengeHeap::gc_task_manager();
815 }
816
817 void PSScavenge::initialize() {
818 // Arguments must have been parsed
819
820 if (AlwaysTenure || NeverTenure) {
821 _tenuring_threshold = MaxTenuringThreshold;
822 } else {
823 // We want to smooth out our startup times for the AdaptiveSizePolicy
824 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
825 MaxTenuringThreshold;
826 }
827
828 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
829 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
830
831 PSYoungGen* young_gen = heap->young_gen();
832 PSOldGen* old_gen = heap->old_gen();
833
834 // Set boundary between young_gen and old_gen
835 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
836 "old above young");
837 set_young_generation_boundary(young_gen->eden_space()->bottom());
838
839 // Initialize ref handling object for scavenging.
840 MemRegion mr = young_gen->reserved();
841
|