874 } 875 876 877 bool ParNewGeneration::_avoid_promotion_undo = false; 878 879 void ParNewGeneration::adjust_desired_tenuring_threshold() { 880 // Set the desired survivor size to half the real survivor space 881 _tenuring_threshold = 882 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 883 } 884 885 // A Generation that does parallel young-gen collection. 886 887 void ParNewGeneration::collect(bool full, 888 bool clear_all_soft_refs, 889 size_t size, 890 bool is_tlab) { 891 assert(full || size > 0, "otherwise we don't want to collect"); 892 893 GenCollectedHeap* gch = GenCollectedHeap::heap(); 894 895 _gc_timer->register_gc_start(os::elapsed_counter()); 896 ParNewTracer gc_tracer; 897 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 898 899 gch->trace_heap_before_gc(&gc_tracer); 900 901 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 902 "not a CMS generational heap"); 903 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 904 FlexibleWorkGang* workers = gch->workers(); 905 assert(workers != NULL, "Need workgang for parallel work"); 906 int active_workers = 907 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 908 workers->active_workers(), 909 Threads::number_of_non_daemon_threads()); 910 workers->set_active_workers(active_workers); 911 _next_gen = gch->next_gen(this); 912 assert(_next_gen != NULL, 913 "This must be the youngest gen, and not the only gen"); 914 assert(gch->n_gens() == 2, 915 "Par collection currently only works with single older gen."); 916 // Do we have to avoid promotion_undo? 917 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 918 set_avoid_promotion_undo(true); 919 } 920 921 // If the next generation is too full to accomodate worst-case promotion 922 // from this generation, pass on collection; let the next generation 923 // do it. 924 if (!collection_attempt_is_safe()) { 925 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 926 return; 927 } 928 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 929 930 init_assuming_no_promotion_failure(); 931 932 if (UseAdaptiveSizePolicy) { 933 set_survivor_overflow(false); 934 size_policy->minor_collection_begin(); 935 } 936 937 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 938 // Capture heap used before collection (for printing). 939 size_t gch_prev_used = gch->used(); 940 941 SpecializationStats::clear(); 942 943 age_table()->clear(); 944 to()->clear(SpaceDecorator::Mangle); 945 946 gch->save_marks(); 947 assert(workers != NULL, "Need parallel worker threads."); 948 int n_workers = active_workers; 949 | 874 } 875 876 877 bool ParNewGeneration::_avoid_promotion_undo = false; 878 879 void ParNewGeneration::adjust_desired_tenuring_threshold() { 880 // Set the desired survivor size to half the real survivor space 881 _tenuring_threshold = 882 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 883 } 884 885 // A Generation that does parallel young-gen collection. 886 887 void ParNewGeneration::collect(bool full, 888 bool clear_all_soft_refs, 889 size_t size, 890 bool is_tlab) { 891 assert(full || size > 0, "otherwise we don't want to collect"); 892 893 GenCollectedHeap* gch = GenCollectedHeap::heap(); 894 ParNewTracer gc_tracer; 895 896 _gc_timer->register_gc_start(os::elapsed_counter()); 897 898 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 899 "not a CMS generational heap"); 900 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 901 FlexibleWorkGang* workers = gch->workers(); 902 assert(workers != NULL, "Need workgang for parallel work"); 903 int active_workers = 904 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 905 workers->active_workers(), 906 Threads::number_of_non_daemon_threads()); 907 workers->set_active_workers(active_workers); 908 _next_gen = gch->next_gen(this); 909 assert(_next_gen != NULL, 910 "This must be the youngest gen, and not the only gen"); 911 assert(gch->n_gens() == 2, 912 "Par collection currently only works with single older gen."); 913 // Do we have to avoid promotion_undo? 914 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 915 set_avoid_promotion_undo(true); 916 } 917 918 // If the next generation is too full to accomodate worst-case promotion 919 // from this generation, pass on collection; let the next generation 920 // do it. 921 if (!collection_attempt_is_safe()) { 922 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 923 return; 924 } 925 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 926 927 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 928 gch->trace_heap_before_gc(&gc_tracer); 929 930 init_assuming_no_promotion_failure(); 931 932 if (UseAdaptiveSizePolicy) { 933 set_survivor_overflow(false); 934 size_policy->minor_collection_begin(); 935 } 936 937 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 938 // Capture heap used before collection (for printing). 939 size_t gch_prev_used = gch->used(); 940 941 SpecializationStats::clear(); 942 943 age_table()->clear(); 944 to()->clear(SpaceDecorator::Mangle); 945 946 gch->save_marks(); 947 assert(workers != NULL, "Need parallel worker threads."); 948 int n_workers = active_workers; 949 |