< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page




 872   // Trace promotion failure in the parallel GC threads
 873   thread_state_set.trace_promotion_failed(gc_tracer());
 874   // Single threaded code may have reported promotion failure to the global state
 875   if (_promotion_failed_info.has_failed()) {
 876     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 877   }
 878   // Reset the PromotionFailureALot counters.
 879   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 880 }
 881 
 882 void ParNewGeneration::collect(bool   full,
 883                                bool   clear_all_soft_refs,
 884                                size_t size,
 885                                bool   is_tlab) {
 886   assert(full || size > 0, "otherwise we don't want to collect");
 887 
 888   CMSHeap* gch = CMSHeap::heap();
 889 
 890   _gc_timer->register_gc_start();
 891 
 892   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 893   WorkGang* workers = gch->workers();
 894   assert(workers != NULL, "Need workgang for parallel work");
 895   uint active_workers =
 896        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 897                                                workers->active_workers(),
 898                                                Threads::number_of_non_daemon_threads());
 899   active_workers = workers->update_active_workers(active_workers);
 900   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 901 
 902   _old_gen = gch->old_gen();
 903 
 904   // If the next generation is too full to accommodate worst-case promotion
 905   // from this generation, pass on collection; let the next generation
 906   // do it.
 907   if (!collection_attempt_is_safe()) {
 908     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 909     return;
 910   }
 911   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 912 


1473     // Allocate and initialize a reference processor
1474     _ref_processor =
1475       new ReferenceProcessor(_reserved,                  // span
1476                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1477                              ParallelGCThreads,          // mt processing degree
1478                              refs_discovery_is_mt(),     // mt discovery
1479                              ParallelGCThreads,          // mt discovery degree
1480                              refs_discovery_is_atomic(), // atomic_discovery
1481                              NULL);                      // is_alive_non_header
1482   }
1483 }
1484 
1485 const char* ParNewGeneration::name() const {
1486   return "par new generation";
1487 }
1488 
1489 void ParNewGeneration::restore_preserved_marks() {
1490   SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
1491   _preserved_marks_set.restore(&task_executor);
1492 }
1493 


 872   // Trace promotion failure in the parallel GC threads
 873   thread_state_set.trace_promotion_failed(gc_tracer());
 874   // Single threaded code may have reported promotion failure to the global state
 875   if (_promotion_failed_info.has_failed()) {
 876     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 877   }
 878   // Reset the PromotionFailureALot counters.
 879   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 880 }
 881 
 882 void ParNewGeneration::collect(bool   full,
 883                                bool   clear_all_soft_refs,
 884                                size_t size,
 885                                bool   is_tlab) {
 886   assert(full || size > 0, "otherwise we don't want to collect");
 887 
 888   CMSHeap* gch = CMSHeap::heap();
 889 
 890   _gc_timer->register_gc_start();
 891 
 892   AdaptiveSizePolicy* size_policy = gch->size_policy();
 893   WorkGang* workers = gch->workers();
 894   assert(workers != NULL, "Need workgang for parallel work");
 895   uint active_workers =
 896        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 897                                                workers->active_workers(),
 898                                                Threads::number_of_non_daemon_threads());
 899   active_workers = workers->update_active_workers(active_workers);
 900   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 901 
 902   _old_gen = gch->old_gen();
 903 
 904   // If the next generation is too full to accommodate worst-case promotion
 905   // from this generation, pass on collection; let the next generation
 906   // do it.
 907   if (!collection_attempt_is_safe()) {
 908     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 909     return;
 910   }
 911   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 912 


1473     // Allocate and initialize a reference processor
1474     _ref_processor =
1475       new ReferenceProcessor(_reserved,                  // span
1476                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1477                              ParallelGCThreads,          // mt processing degree
1478                              refs_discovery_is_mt(),     // mt discovery
1479                              ParallelGCThreads,          // mt discovery degree
1480                              refs_discovery_is_atomic(), // atomic_discovery
1481                              NULL);                      // is_alive_non_header
1482   }
1483 }
1484 
1485 const char* ParNewGeneration::name() const {
1486   return "par new generation";
1487 }
1488 
1489 void ParNewGeneration::restore_preserved_marks() {
1490   SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
1491   _preserved_marks_set.restore(&task_executor);
1492 }

< prev index next >