< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page




 787 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 788   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 789   EnqueueTask& _task;
 790 
 791 public:
 792   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 793     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 794       _task(task)
 795   { }
 796 
 797   virtual void work(uint worker_id)
 798   {
 799     _task.work(worker_id);
 800   }
 801 };
 802 
 803 
 804 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
 805 {
 806   GenCollectedHeap* gch = GenCollectedHeap::heap();
 807   FlexibleWorkGang* workers = gch->workers();
 808   assert(workers != NULL, "Need parallel worker threads.");
 809   _state_set.reset(workers->active_workers(), _generation.promotion_failed());
 810   ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
 811                                  _generation.reserved().end(), _state_set);
 812   workers->run_task(&rp_task);
 813   _state_set.reset(0 /* bad value in debug if not reset */,
 814                    _generation.promotion_failed());
 815 }
 816 
 817 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
 818 {
 819   GenCollectedHeap* gch = GenCollectedHeap::heap();
 820   FlexibleWorkGang* workers = gch->workers();
 821   assert(workers != NULL, "Need parallel worker threads.");
 822   ParNewRefEnqueueTaskProxy enq_task(task);
 823   workers->run_task(&enq_task);
 824 }
 825 
 826 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
 827 {
 828   _state_set.flush();
 829   GenCollectedHeap* gch = GenCollectedHeap::heap();
 830   gch->save_marks();
 831 }
 832 
 833 ScanClosureWithParBarrier::
 834 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 835   ScanClosure(g, gc_barrier) {}
 836 
 837 EvacuateFollowersClosureGeneral::
 838 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
 839                                 OopsInGenClosure* cur,
 840                                 OopsInGenClosure* older) :


 874   thread_state_set.trace_promotion_failed(gc_tracer());
 875   // Single threaded code may have reported promotion failure to the global state
 876   if (_promotion_failed_info.has_failed()) {
 877     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 878   }
 879   // Reset the PromotionFailureALot counters.
 880   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 881 }
 882 
 883 void ParNewGeneration::collect(bool   full,
 884                                bool   clear_all_soft_refs,
 885                                size_t size,
 886                                bool   is_tlab) {
 887   assert(full || size > 0, "otherwise we don't want to collect");
 888 
 889   GenCollectedHeap* gch = GenCollectedHeap::heap();
 890 
 891   _gc_timer->register_gc_start();
 892 
 893   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 894   FlexibleWorkGang* workers = gch->workers();
 895   assert(workers != NULL, "Need workgang for parallel work");
 896   uint active_workers =
 897        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 898                                                workers->active_workers(),
 899                                                Threads::number_of_non_daemon_threads());
 900   workers->set_active_workers(active_workers);
 901   _old_gen = gch->old_gen();
 902 
 903   // If the next generation is too full to accommodate worst-case promotion
 904   // from this generation, pass on collection; let the next generation
 905   // do it.
 906   if (!collection_attempt_is_safe()) {
 907     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 908     return;
 909   }
 910   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 911 
 912   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 913   gch->trace_heap_before_gc(gc_tracer());
 914 




 787 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 788   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 789   EnqueueTask& _task;
 790 
 791 public:
 792   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 793     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 794       _task(task)
 795   { }
 796 
 797   virtual void work(uint worker_id)
 798   {
 799     _task.work(worker_id);
 800   }
 801 };
 802 
 803 
 804 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
 805 {
 806   GenCollectedHeap* gch = GenCollectedHeap::heap();
 807   WorkGang* workers = gch->workers();
 808   assert(workers != NULL, "Need parallel worker threads.");
 809   _state_set.reset(workers->active_workers(), _generation.promotion_failed());
 810   ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
 811                                  _generation.reserved().end(), _state_set);
 812   workers->run_task(&rp_task);
 813   _state_set.reset(0 /* bad value in debug if not reset */,
 814                    _generation.promotion_failed());
 815 }
 816 
 817 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
 818 {
 819   GenCollectedHeap* gch = GenCollectedHeap::heap();
 820   WorkGang* workers = gch->workers();
 821   assert(workers != NULL, "Need parallel worker threads.");
 822   ParNewRefEnqueueTaskProxy enq_task(task);
 823   workers->run_task(&enq_task);
 824 }
 825 
 826 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
 827 {
 828   _state_set.flush();
 829   GenCollectedHeap* gch = GenCollectedHeap::heap();
 830   gch->save_marks();
 831 }
 832 
 833 ScanClosureWithParBarrier::
 834 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 835   ScanClosure(g, gc_barrier) {}
 836 
 837 EvacuateFollowersClosureGeneral::
 838 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
 839                                 OopsInGenClosure* cur,
 840                                 OopsInGenClosure* older) :


 874   thread_state_set.trace_promotion_failed(gc_tracer());
 875   // Single threaded code may have reported promotion failure to the global state
 876   if (_promotion_failed_info.has_failed()) {
 877     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 878   }
 879   // Reset the PromotionFailureALot counters.
 880   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 881 }
 882 
 883 void ParNewGeneration::collect(bool   full,
 884                                bool   clear_all_soft_refs,
 885                                size_t size,
 886                                bool   is_tlab) {
 887   assert(full || size > 0, "otherwise we don't want to collect");
 888 
 889   GenCollectedHeap* gch = GenCollectedHeap::heap();
 890 
 891   _gc_timer->register_gc_start();
 892 
 893   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 894   WorkGang* workers = gch->workers();
 895   assert(workers != NULL, "Need workgang for parallel work");
 896   uint active_workers =
 897        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 898                                                workers->active_workers(),
 899                                                Threads::number_of_non_daemon_threads());
 900   workers->set_active_workers(active_workers);
 901   _old_gen = gch->old_gen();
 902 
 903   // If the next generation is too full to accommodate worst-case promotion
 904   // from this generation, pass on collection; let the next generation
 905   // do it.
 906   if (!collection_attempt_is_safe()) {
 907     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 908     return;
 909   }
 910   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 911 
 912   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 913   gch->trace_heap_before_gc(gc_tracer());
 914 


< prev index next >