< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page
rev 11935 : [mq]: 8165292-gc-task-logging-spams-log


 882 }
 883 
 884 void ParNewGeneration::collect(bool   full,
 885                                bool   clear_all_soft_refs,
 886                                size_t size,
 887                                bool   is_tlab) {
 888   assert(full || size > 0, "otherwise we don't want to collect");
 889 
 890   GenCollectedHeap* gch = GenCollectedHeap::heap();
 891 
 892   _gc_timer->register_gc_start();
 893 
 894   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 895   WorkGang* workers = gch->workers();
 896   assert(workers != NULL, "Need workgang for parallel work");
 897   uint active_workers =
 898        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 899                                                workers->active_workers(),
 900                                                Threads::number_of_non_daemon_threads());
 901   active_workers = workers->update_active_workers(active_workers);


 902   _old_gen = gch->old_gen();
 903 
 904   // If the next generation is too full to accommodate worst-case promotion
 905   // from this generation, pass on collection; let the next generation
 906   // do it.
 907   if (!collection_attempt_is_safe()) {
 908     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 909     return;
 910   }
 911   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 912 
 913   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 914   gch->trace_heap_before_gc(gc_tracer());
 915 
 916   init_assuming_no_promotion_failure();
 917 
 918   if (UseAdaptiveSizePolicy) {
 919     set_survivor_overflow(false);
 920     size_policy->minor_collection_begin();
 921   }




 882 }
 883 
 884 void ParNewGeneration::collect(bool   full,
 885                                bool   clear_all_soft_refs,
 886                                size_t size,
 887                                bool   is_tlab) {
 888   assert(full || size > 0, "otherwise we don't want to collect");
 889 
 890   GenCollectedHeap* gch = GenCollectedHeap::heap();
 891 
 892   _gc_timer->register_gc_start();
 893 
 894   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 895   WorkGang* workers = gch->workers();
 896   assert(workers != NULL, "Need workgang for parallel work");
 897   uint active_workers =
 898        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 899                                                workers->active_workers(),
 900                                                Threads::number_of_non_daemon_threads());
 901   active_workers = workers->update_active_workers(active_workers);
 902   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 903 
 904   _old_gen = gch->old_gen();
 905 
 906   // If the next generation is too full to accommodate worst-case promotion
 907   // from this generation, pass on collection; let the next generation
 908   // do it.
 909   if (!collection_attempt_is_safe()) {
 910     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 911     return;
 912   }
 913   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 914 
 915   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 916   gch->trace_heap_before_gc(gc_tracer());
 917 
 918   init_assuming_no_promotion_failure();
 919 
 920   if (UseAdaptiveSizePolicy) {
 921     set_survivor_overflow(false);
 922     size_policy->minor_collection_begin();
 923   }


< prev index next >