< prev index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 8068 : 6407976: GC worker number should be unsigned
Reviewed-by: jwilhelm


 290   }
 291 }
 292 
 293 class ParScanThreadStateSet: private ResourceArray {
 294 public:
 295   // Initializes states for the specified number of threads;
 296   ParScanThreadStateSet(int                     num_threads,
 297                         Space&                  to_space,
 298                         ParNewGeneration&       gen,
 299                         Generation&             old_gen,
 300                         ObjToScanQueueSet&      queue_set,
 301                         Stack<oop, mtGC>*       overflow_stacks_,
 302                         size_t                  desired_plab_sz,
 303                         ParallelTaskTerminator& term);
 304 
 305   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 306 
 307   inline ParScanThreadState& thread_state(int i);
 308 
 309   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 310   void reset(int active_workers, bool promotion_failed);
 311   void flush();
 312 
 313   #if TASKQUEUE_STATS
 314   static void
 315     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 316   void print_termination_stats(outputStream* const st = gclog_or_tty);
 317   static void
 318     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 319   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 320   void reset_stats();
 321   #endif // TASKQUEUE_STATS
 322 
 323 private:
 324   ParallelTaskTerminator& _term;
 325   ParNewGeneration&       _gen;
 326   Generation&             _old_gen;
 327  public:
 328   bool is_valid(int id) const { return id < length(); }
 329   ParallelTaskTerminator* terminator() { return &_term; }
 330 };


 347         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 348                            overflow_stacks, desired_plab_sz, term);
 349   }
 350 }
 351 
 352 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 353 {
 354   assert(i >= 0 && i < length(), "sanity check!");
 355   return ((ParScanThreadState*)_data)[i];
 356 }
 357 
 358 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
 359   for (int i = 0; i < length(); ++i) {
 360     if (thread_state(i).promotion_failed()) {
 361       gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
 362       thread_state(i).promotion_failed_info().reset();
 363     }
 364   }
 365 }
 366 
 367 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
 368 {
 369   _term.reset_for_reuse(active_threads);
 370   if (promotion_failed) {
 371     for (int i = 0; i < length(); ++i) {
 372       thread_state(i).print_promotion_failure_size();
 373     }
 374   }
 375 }
 376 
 377 #if TASKQUEUE_STATS
 378 void
 379 ParScanThreadState::reset_stats()
 380 {
 381   taskqueue_stats().reset();
 382   _term_attempts = 0;
 383   _overflow_refills = 0;
 384   _overflow_refill_objs = 0;
 385 }
 386 
 387 void ParScanThreadStateSet::reset_stats()


 565     par_scan_state()->start_term_time();
 566     if (terminator()->offer_termination()) break;
 567     par_scan_state()->end_term_time();
 568   }
 569   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 570          "Broken overflow list?");
 571   // Finish the last termination pause.
 572   par_scan_state()->end_term_time();
 573 }
 574 
 575 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 576                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 577     AbstractGangTask("ParNewGeneration collection"),
 578     _gen(gen), _old_gen(old_gen),
 579     _young_old_boundary(young_old_boundary),
 580     _state_set(state_set)
 581   {}
 582 
 583 // Reset the terminator for the given number of
 584 // active threads.
 585 void ParNewGenTask::set_for_termination(int active_workers) {
 586   _state_set->reset(active_workers, _gen->promotion_failed());
 587   // Should the heap be passed in?  There's only 1 for now so
 588   // grab it instead.
 589   GenCollectedHeap* gch = GenCollectedHeap::heap();
 590   gch->set_n_termination(active_workers);
 591 }
 592 
 593 void ParNewGenTask::work(uint worker_id) {
 594   GenCollectedHeap* gch = GenCollectedHeap::heap();
 595   // Since this is being done in a separate thread, need new resource
 596   // and handle marks.
 597   ResourceMark rm;
 598   HandleMark hm;
 599 
 600   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 601   assert(_state_set->is_valid(worker_id), "Should not have been called");
 602 
 603   par_scan_state.set_young_old_boundary(_young_old_boundary);
 604 
 605   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),


 748         _rs->write_ref_field_gc_par(p, obj);
 749       }
 750     }
 751   }
 752 }
 753 
 754 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 755 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 756 
 757 class ParNewRefProcTaskProxy: public AbstractGangTask {
 758   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 759 public:
 760   ParNewRefProcTaskProxy(ProcessTask& task,
 761                          ParNewGeneration& gen,
 762                          Generation& old_gen,
 763                          HeapWord* young_old_boundary,
 764                          ParScanThreadStateSet& state_set);
 765 
 766 private:
 767   virtual void work(uint worker_id);
 768   virtual void set_for_termination(int active_workers) {
 769     _state_set.terminator()->reset_for_reuse(active_workers);
 770   }
 771 private:
 772   ParNewGeneration&      _gen;
 773   ProcessTask&           _task;
 774   Generation&            _old_gen;
 775   HeapWord*              _young_old_boundary;
 776   ParScanThreadStateSet& _state_set;
 777 };
 778 
 779 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 780                                                ParNewGeneration& gen,
 781                                                Generation& old_gen,
 782                                                HeapWord* young_old_boundary,
 783                                                ParScanThreadStateSet& state_set)
 784   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 785     _gen(gen),
 786     _task(task),
 787     _old_gen(old_gen),
 788     _young_old_boundary(young_old_boundary),


 898   }
 899   // Reset the PromotionFailureALot counters.
 900   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 901 }
 902 
 903 void ParNewGeneration::collect(bool   full,
 904                                bool   clear_all_soft_refs,
 905                                size_t size,
 906                                bool   is_tlab) {
 907   assert(full || size > 0, "otherwise we don't want to collect");
 908 
 909   GenCollectedHeap* gch = GenCollectedHeap::heap();
 910 
 911   _gc_timer->register_gc_start();
 912 
 913   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 914     "not a CMS generational heap");
 915   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 916   FlexibleWorkGang* workers = gch->workers();
 917   assert(workers != NULL, "Need workgang for parallel work");
 918   int active_workers =
 919       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 920                                    workers->active_workers(),
 921                                    Threads::number_of_non_daemon_threads());
 922   workers->set_active_workers(active_workers);
 923   _old_gen = gch->old_gen();
 924 
 925   // If the next generation is too full to accommodate worst-case promotion
 926   // from this generation, pass on collection; let the next generation
 927   // do it.
 928   if (!collection_attempt_is_safe()) {
 929     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 930     return;
 931   }
 932   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 933 
 934   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 935   gch->trace_heap_before_gc(gc_tracer());
 936 
 937   init_assuming_no_promotion_failure();
 938 
 939   if (UseAdaptiveSizePolicy) {
 940     set_survivor_overflow(false);
 941     size_policy->minor_collection_begin();
 942   }
 943 
 944   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 945   // Capture heap used before collection (for printing).
 946   size_t gch_prev_used = gch->used();
 947 
 948   age_table()->clear();
 949   to()->clear(SpaceDecorator::Mangle);
 950 
 951   gch->save_marks();
 952   assert(workers != NULL, "Need parallel worker threads.");
 953   int n_workers = active_workers;
 954 
 955   // Set the correct parallelism (number of queues) in the reference processor
 956   ref_processor()->set_active_mt_degree(n_workers);
 957 
 958   // Always set the terminator for the active number of workers
 959   // because only those workers go through the termination protocol.
 960   ParallelTaskTerminator _term(n_workers, task_queues());
 961   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 962                                          *to(), *this, *_old_gen, *task_queues(),
 963                                          _overflow_stacks, desired_plab_sz(), _term);
 964 
 965   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
 966   gch->set_par_threads(n_workers);
 967   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 968   // It turns out that even when we're using 1 thread, doing the work in a
 969   // separate thread causes wide variance in run times.  We can't help this
 970   // in the multi-threaded case, but we special-case n=1 here to get
 971   // repeatable measurements of the 1-thread overhead of the parallel code.
 972   if (n_workers > 1) {
 973     StrongRootsScope srs;




 290   }
 291 }
 292 
 293 class ParScanThreadStateSet: private ResourceArray {
 294 public:
 295   // Initializes states for the specified number of threads;
 296   ParScanThreadStateSet(int                     num_threads,
 297                         Space&                  to_space,
 298                         ParNewGeneration&       gen,
 299                         Generation&             old_gen,
 300                         ObjToScanQueueSet&      queue_set,
 301                         Stack<oop, mtGC>*       overflow_stacks_,
 302                         size_t                  desired_plab_sz,
 303                         ParallelTaskTerminator& term);
 304 
 305   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 306 
 307   inline ParScanThreadState& thread_state(int i);
 308 
 309   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 310   void reset(uint active_workers, bool promotion_failed);
 311   void flush();
 312 
 313   #if TASKQUEUE_STATS
 314   static void
 315     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 316   void print_termination_stats(outputStream* const st = gclog_or_tty);
 317   static void
 318     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 319   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 320   void reset_stats();
 321   #endif // TASKQUEUE_STATS
 322 
 323 private:
 324   ParallelTaskTerminator& _term;
 325   ParNewGeneration&       _gen;
 326   Generation&             _old_gen;
 327  public:
 328   bool is_valid(int id) const { return id < length(); }
 329   ParallelTaskTerminator* terminator() { return &_term; }
 330 };


 347         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 348                            overflow_stacks, desired_plab_sz, term);
 349   }
 350 }
 351 
 352 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 353 {
 354   assert(i >= 0 && i < length(), "sanity check!");
 355   return ((ParScanThreadState*)_data)[i];
 356 }
 357 
 358 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
 359   for (int i = 0; i < length(); ++i) {
 360     if (thread_state(i).promotion_failed()) {
 361       gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
 362       thread_state(i).promotion_failed_info().reset();
 363     }
 364   }
 365 }
 366 
 367 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
 368 {
 369   _term.reset_for_reuse(active_threads);
 370   if (promotion_failed) {
 371     for (int i = 0; i < length(); ++i) {
 372       thread_state(i).print_promotion_failure_size();
 373     }
 374   }
 375 }
 376 
 377 #if TASKQUEUE_STATS
 378 void
 379 ParScanThreadState::reset_stats()
 380 {
 381   taskqueue_stats().reset();
 382   _term_attempts = 0;
 383   _overflow_refills = 0;
 384   _overflow_refill_objs = 0;
 385 }
 386 
 387 void ParScanThreadStateSet::reset_stats()


 565     par_scan_state()->start_term_time();
 566     if (terminator()->offer_termination()) break;
 567     par_scan_state()->end_term_time();
 568   }
 569   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 570          "Broken overflow list?");
 571   // Finish the last termination pause.
 572   par_scan_state()->end_term_time();
 573 }
 574 
 575 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 576                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 577     AbstractGangTask("ParNewGeneration collection"),
 578     _gen(gen), _old_gen(old_gen),
 579     _young_old_boundary(young_old_boundary),
 580     _state_set(state_set)
 581   {}
 582 
 583 // Reset the terminator for the given number of
 584 // active threads.
 585 void ParNewGenTask::set_for_termination(uint active_workers) {
 586   _state_set->reset(active_workers, _gen->promotion_failed());
 587   // Should the heap be passed in?  There's only 1 for now so
 588   // grab it instead.
 589   GenCollectedHeap* gch = GenCollectedHeap::heap();
 590   gch->set_n_termination(active_workers);
 591 }
 592 
 593 void ParNewGenTask::work(uint worker_id) {
 594   GenCollectedHeap* gch = GenCollectedHeap::heap();
 595   // Since this is being done in a separate thread, need new resource
 596   // and handle marks.
 597   ResourceMark rm;
 598   HandleMark hm;
 599 
 600   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 601   assert(_state_set->is_valid(worker_id), "Should not have been called");
 602 
 603   par_scan_state.set_young_old_boundary(_young_old_boundary);
 604 
 605   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),


 748         _rs->write_ref_field_gc_par(p, obj);
 749       }
 750     }
 751   }
 752 }
 753 
 754 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 755 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 756 
 757 class ParNewRefProcTaskProxy: public AbstractGangTask {
 758   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 759 public:
 760   ParNewRefProcTaskProxy(ProcessTask& task,
 761                          ParNewGeneration& gen,
 762                          Generation& old_gen,
 763                          HeapWord* young_old_boundary,
 764                          ParScanThreadStateSet& state_set);
 765 
 766 private:
 767   virtual void work(uint worker_id);
 768   virtual void set_for_termination(uint active_workers) {
 769     _state_set.terminator()->reset_for_reuse(active_workers);
 770   }
 771 private:
 772   ParNewGeneration&      _gen;
 773   ProcessTask&           _task;
 774   Generation&            _old_gen;
 775   HeapWord*              _young_old_boundary;
 776   ParScanThreadStateSet& _state_set;
 777 };
 778 
 779 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 780                                                ParNewGeneration& gen,
 781                                                Generation& old_gen,
 782                                                HeapWord* young_old_boundary,
 783                                                ParScanThreadStateSet& state_set)
 784   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 785     _gen(gen),
 786     _task(task),
 787     _old_gen(old_gen),
 788     _young_old_boundary(young_old_boundary),


 898   }
 899   // Reset the PromotionFailureALot counters.
 900   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 901 }
 902 
 903 void ParNewGeneration::collect(bool   full,
 904                                bool   clear_all_soft_refs,
 905                                size_t size,
 906                                bool   is_tlab) {
 907   assert(full || size > 0, "otherwise we don't want to collect");
 908 
 909   GenCollectedHeap* gch = GenCollectedHeap::heap();
 910 
 911   _gc_timer->register_gc_start();
 912 
 913   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 914     "not a CMS generational heap");
 915   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 916   FlexibleWorkGang* workers = gch->workers();
 917   assert(workers != NULL, "Need workgang for parallel work");
 918   uint active_workers =
 919       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 920                                    workers->active_workers(),
 921                                    Threads::number_of_non_daemon_threads());
 922   workers->set_active_workers(active_workers);
 923   _old_gen = gch->old_gen();
 924 
 925   // If the next generation is too full to accommodate worst-case promotion
 926   // from this generation, pass on collection; let the next generation
 927   // do it.
 928   if (!collection_attempt_is_safe()) {
 929     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 930     return;
 931   }
 932   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 933 
 934   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 935   gch->trace_heap_before_gc(gc_tracer());
 936 
 937   init_assuming_no_promotion_failure();
 938 
 939   if (UseAdaptiveSizePolicy) {
 940     set_survivor_overflow(false);
 941     size_policy->minor_collection_begin();
 942   }
 943 
 944   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 945   // Capture heap used before collection (for printing).
 946   size_t gch_prev_used = gch->used();
 947 
 948   age_table()->clear();
 949   to()->clear(SpaceDecorator::Mangle);
 950 
 951   gch->save_marks();
 952   assert(workers != NULL, "Need parallel worker threads.");
 953   uint n_workers = active_workers;
 954 
 955   // Set the correct parallelism (number of queues) in the reference processor
 956   ref_processor()->set_active_mt_degree(n_workers);
 957 
 958   // Always set the terminator for the active number of workers
 959   // because only those workers go through the termination protocol.
 960   ParallelTaskTerminator _term(n_workers, task_queues());
 961   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 962                                          *to(), *this, *_old_gen, *task_queues(),
 963                                          _overflow_stacks, desired_plab_sz(), _term);
 964 
 965   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
 966   gch->set_par_threads(n_workers);
 967   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 968   // It turns out that even when we're using 1 thread, doing the work in a
 969   // separate thread causes wide variance in run times.  We can't help this
 970   // in the multi-threaded case, but we special-case n=1 here to get
 971   // repeatable measurements of the 1-thread overhead of the parallel code.
 972   if (n_workers > 1) {
 973     StrongRootsScope srs;


< prev index next >