< prev index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page




 291   }
 292 }
 293 
 294 class ParScanThreadStateSet: private ResourceArray {
 295 public:
 296   // Initializes states for the specified number of threads;
 297   ParScanThreadStateSet(int                     num_threads,
 298                         Space&                  to_space,
 299                         ParNewGeneration&       gen,
 300                         Generation&             old_gen,
 301                         ObjToScanQueueSet&      queue_set,
 302                         Stack<oop, mtGC>*       overflow_stacks_,
 303                         size_t                  desired_plab_sz,
 304                         ParallelTaskTerminator& term);
 305 
 306   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 307 
 308   inline ParScanThreadState& thread_state(int i);
 309 
 310   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 311   void reset(int active_workers, bool promotion_failed);
 312   void flush();
 313 
 314   #if TASKQUEUE_STATS
 315   static void
 316     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 317   void print_termination_stats(outputStream* const st = gclog_or_tty);
 318   static void
 319     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 320   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 321   void reset_stats();
 322   #endif // TASKQUEUE_STATS
 323 
 324 private:
 325   ParallelTaskTerminator& _term;
 326   ParNewGeneration&       _gen;
 327   Generation&             _old_gen;
 328  public:
 329   bool is_valid(int id) const { return id < length(); }
 330   ParallelTaskTerminator* terminator() { return &_term; }
 331 };


 348         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 349                            overflow_stacks, desired_plab_sz, term);
 350   }
 351 }
 352 
 353 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 354 {
 355   assert(i >= 0 && i < length(), "sanity check!");
 356   return ((ParScanThreadState*)_data)[i];
 357 }
 358 
 359 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
 360   for (int i = 0; i < length(); ++i) {
 361     if (thread_state(i).promotion_failed()) {
 362       gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
 363       thread_state(i).promotion_failed_info().reset();
 364     }
 365   }
 366 }
 367 
 368 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
 369 {
 370   _term.reset_for_reuse(active_threads);
 371   if (promotion_failed) {
 372     for (int i = 0; i < length(); ++i) {
 373       thread_state(i).print_promotion_failure_size();
 374     }
 375   }
 376 }
 377 
 378 #if TASKQUEUE_STATS
 379 void
 380 ParScanThreadState::reset_stats()
 381 {
 382   taskqueue_stats().reset();
 383   _term_attempts = 0;
 384   _overflow_refills = 0;
 385   _overflow_refill_objs = 0;
 386 }
 387 
 388 void ParScanThreadStateSet::reset_stats()


 566     par_scan_state()->start_term_time();
 567     if (terminator()->offer_termination()) break;
 568     par_scan_state()->end_term_time();
 569   }
 570   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 571          "Broken overflow list?");
 572   // Finish the last termination pause.
 573   par_scan_state()->end_term_time();
 574 }
 575 
 576 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 577                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 578     AbstractGangTask("ParNewGeneration collection"),
 579     _gen(gen), _old_gen(old_gen),
 580     _young_old_boundary(young_old_boundary),
 581     _state_set(state_set)
 582   {}
 583 
 584 // Reset the terminator for the given number of
 585 // active threads.
 586 void ParNewGenTask::set_for_termination(int active_workers) {
 587   _state_set->reset(active_workers, _gen->promotion_failed());
 588   // Should the heap be passed in?  There's only 1 for now so
 589   // grab it instead.
 590   GenCollectedHeap* gch = GenCollectedHeap::heap();
 591   gch->set_n_termination(active_workers);
 592 }
 593 
 594 void ParNewGenTask::work(uint worker_id) {
 595   GenCollectedHeap* gch = GenCollectedHeap::heap();
 596   // Since this is being done in a separate thread, need new resource
 597   // and handle marks.
 598   ResourceMark rm;
 599   HandleMark hm;
 600 
 601   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 602   assert(_state_set->is_valid(worker_id), "Should not have been called");
 603 
 604   par_scan_state.set_young_old_boundary(_young_old_boundary);
 605 
 606   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),


 749         _rs->write_ref_field_gc_par(p, obj);
 750       }
 751     }
 752   }
 753 }
 754 
 755 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 756 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 757 
 758 class ParNewRefProcTaskProxy: public AbstractGangTask {
 759   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 760 public:
 761   ParNewRefProcTaskProxy(ProcessTask& task,
 762                          ParNewGeneration& gen,
 763                          Generation& old_gen,
 764                          HeapWord* young_old_boundary,
 765                          ParScanThreadStateSet& state_set);
 766 
 767 private:
 768   virtual void work(uint worker_id);
 769   virtual void set_for_termination(int active_workers) {
 770     _state_set.terminator()->reset_for_reuse(active_workers);
 771   }
 772 private:
 773   ParNewGeneration&      _gen;
 774   ProcessTask&           _task;
 775   Generation&            _old_gen;
 776   HeapWord*              _young_old_boundary;
 777   ParScanThreadStateSet& _state_set;
 778 };
 779 
 780 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 781                                                ParNewGeneration& gen,
 782                                                Generation& old_gen,
 783                                                HeapWord* young_old_boundary,
 784                                                ParScanThreadStateSet& state_set)
 785   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 786     _gen(gen),
 787     _task(task),
 788     _old_gen(old_gen),
 789     _young_old_boundary(young_old_boundary),


 895   if (_promotion_failed_info.has_failed()) {
 896     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 897   }
 898   // Reset the PromotionFailureALot counters.
 899   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 900 }
 901 
 902 void ParNewGeneration::collect(bool   full,
 903                                bool   clear_all_soft_refs,
 904                                size_t size,
 905                                bool   is_tlab) {
 906   assert(full || size > 0, "otherwise we don't want to collect");
 907 
 908   GenCollectedHeap* gch = GenCollectedHeap::heap();
 909 
 910   _gc_timer->register_gc_start();
 911 
 912   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 913   FlexibleWorkGang* workers = gch->workers();
 914   assert(workers != NULL, "Need workgang for parallel work");
 915   int active_workers =
 916       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 917                                    workers->active_workers(),
 918                                    Threads::number_of_non_daemon_threads());
 919   workers->set_active_workers(active_workers);
 920   _old_gen = gch->old_gen();
 921 
 922   // If the next generation is too full to accommodate worst-case promotion
 923   // from this generation, pass on collection; let the next generation
 924   // do it.
 925   if (!collection_attempt_is_safe()) {
 926     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 927     return;
 928   }
 929   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 930 
 931   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 932   gch->trace_heap_before_gc(gc_tracer());
 933 
 934   init_assuming_no_promotion_failure();
 935 
 936   if (UseAdaptiveSizePolicy) {
 937     set_survivor_overflow(false);
 938     size_policy->minor_collection_begin();
 939   }
 940 
 941   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 942   // Capture heap used before collection (for printing).
 943   size_t gch_prev_used = gch->used();
 944 
 945   age_table()->clear();
 946   to()->clear(SpaceDecorator::Mangle);
 947 
 948   gch->save_marks();
 949   assert(workers != NULL, "Need parallel worker threads.");
 950   int n_workers = active_workers;
 951 
 952   // Set the correct parallelism (number of queues) in the reference processor
 953   ref_processor()->set_active_mt_degree(n_workers);
 954 
 955   // Always set the terminator for the active number of workers
 956   // because only those workers go through the termination protocol.
 957   ParallelTaskTerminator _term(n_workers, task_queues());
 958   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 959                                          *to(), *this, *_old_gen, *task_queues(),
 960                                          _overflow_stacks, desired_plab_sz(), _term);
 961 
 962   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
 963   gch->set_par_threads(n_workers);
 964   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 965   // It turns out that even when we're using 1 thread, doing the work in a
 966   // separate thread causes wide variance in run times.  We can't help this
 967   // in the multi-threaded case, but we special-case n=1 here to get
 968   // repeatable measurements of the 1-thread overhead of the parallel code.
 969   if (n_workers > 1) {
 970     StrongRootsScope srs;




 291   }
 292 }
 293 
 294 class ParScanThreadStateSet: private ResourceArray {
 295 public:
 296   // Initializes states for the specified number of threads;
 297   ParScanThreadStateSet(int                     num_threads,
 298                         Space&                  to_space,
 299                         ParNewGeneration&       gen,
 300                         Generation&             old_gen,
 301                         ObjToScanQueueSet&      queue_set,
 302                         Stack<oop, mtGC>*       overflow_stacks_,
 303                         size_t                  desired_plab_sz,
 304                         ParallelTaskTerminator& term);
 305 
 306   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 307 
 308   inline ParScanThreadState& thread_state(int i);
 309 
 310   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 311   void reset(uint active_workers, bool promotion_failed);
 312   void flush();
 313 
 314   #if TASKQUEUE_STATS
 315   static void
 316     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 317   void print_termination_stats(outputStream* const st = gclog_or_tty);
 318   static void
 319     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 320   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 321   void reset_stats();
 322   #endif // TASKQUEUE_STATS
 323 
 324 private:
 325   ParallelTaskTerminator& _term;
 326   ParNewGeneration&       _gen;
 327   Generation&             _old_gen;
 328  public:
 329   bool is_valid(int id) const { return id < length(); }
 330   ParallelTaskTerminator* terminator() { return &_term; }
 331 };


 348         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 349                            overflow_stacks, desired_plab_sz, term);
 350   }
 351 }
 352 
 353 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 354 {
 355   assert(i >= 0 && i < length(), "sanity check!");
 356   return ((ParScanThreadState*)_data)[i];
 357 }
 358 
 359 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
 360   for (int i = 0; i < length(); ++i) {
 361     if (thread_state(i).promotion_failed()) {
 362       gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
 363       thread_state(i).promotion_failed_info().reset();
 364     }
 365   }
 366 }
 367 
 368 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
 369 {
 370   _term.reset_for_reuse(active_threads);
 371   if (promotion_failed) {
 372     for (int i = 0; i < length(); ++i) {
 373       thread_state(i).print_promotion_failure_size();
 374     }
 375   }
 376 }
 377 
 378 #if TASKQUEUE_STATS
 379 void
 380 ParScanThreadState::reset_stats()
 381 {
 382   taskqueue_stats().reset();
 383   _term_attempts = 0;
 384   _overflow_refills = 0;
 385   _overflow_refill_objs = 0;
 386 }
 387 
 388 void ParScanThreadStateSet::reset_stats()


 566     par_scan_state()->start_term_time();
 567     if (terminator()->offer_termination()) break;
 568     par_scan_state()->end_term_time();
 569   }
 570   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 571          "Broken overflow list?");
 572   // Finish the last termination pause.
 573   par_scan_state()->end_term_time();
 574 }
 575 
 576 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 577                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 578     AbstractGangTask("ParNewGeneration collection"),
 579     _gen(gen), _old_gen(old_gen),
 580     _young_old_boundary(young_old_boundary),
 581     _state_set(state_set)
 582   {}
 583 
 584 // Reset the terminator for the given number of
 585 // active threads.
 586 void ParNewGenTask::set_for_termination(uint active_workers) {
 587   _state_set->reset(active_workers, _gen->promotion_failed());
 588   // Should the heap be passed in?  There's only 1 for now so
 589   // grab it instead.
 590   GenCollectedHeap* gch = GenCollectedHeap::heap();
 591   gch->set_n_termination(active_workers);
 592 }
 593 
 594 void ParNewGenTask::work(uint worker_id) {
 595   GenCollectedHeap* gch = GenCollectedHeap::heap();
 596   // Since this is being done in a separate thread, need new resource
 597   // and handle marks.
 598   ResourceMark rm;
 599   HandleMark hm;
 600 
 601   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 602   assert(_state_set->is_valid(worker_id), "Should not have been called");
 603 
 604   par_scan_state.set_young_old_boundary(_young_old_boundary);
 605 
 606   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),


 749         _rs->write_ref_field_gc_par(p, obj);
 750       }
 751     }
 752   }
 753 }
 754 
 755 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 756 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 757 
 758 class ParNewRefProcTaskProxy: public AbstractGangTask {
 759   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 760 public:
 761   ParNewRefProcTaskProxy(ProcessTask& task,
 762                          ParNewGeneration& gen,
 763                          Generation& old_gen,
 764                          HeapWord* young_old_boundary,
 765                          ParScanThreadStateSet& state_set);
 766 
 767 private:
 768   virtual void work(uint worker_id);
 769   virtual void set_for_termination(uint active_workers) {
 770     _state_set.terminator()->reset_for_reuse(active_workers);
 771   }
 772 private:
 773   ParNewGeneration&      _gen;
 774   ProcessTask&           _task;
 775   Generation&            _old_gen;
 776   HeapWord*              _young_old_boundary;
 777   ParScanThreadStateSet& _state_set;
 778 };
 779 
 780 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 781                                                ParNewGeneration& gen,
 782                                                Generation& old_gen,
 783                                                HeapWord* young_old_boundary,
 784                                                ParScanThreadStateSet& state_set)
 785   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 786     _gen(gen),
 787     _task(task),
 788     _old_gen(old_gen),
 789     _young_old_boundary(young_old_boundary),


 895   if (_promotion_failed_info.has_failed()) {
 896     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 897   }
 898   // Reset the PromotionFailureALot counters.
 899   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 900 }
 901 
 902 void ParNewGeneration::collect(bool   full,
 903                                bool   clear_all_soft_refs,
 904                                size_t size,
 905                                bool   is_tlab) {
 906   assert(full || size > 0, "otherwise we don't want to collect");
 907 
 908   GenCollectedHeap* gch = GenCollectedHeap::heap();
 909 
 910   _gc_timer->register_gc_start();
 911 
 912   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 913   FlexibleWorkGang* workers = gch->workers();
 914   assert(workers != NULL, "Need workgang for parallel work");
 915   uint active_workers =
 916        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 917                                    workers->active_workers(),
 918                                    Threads::number_of_non_daemon_threads());
 919   workers->set_active_workers(active_workers);
 920   _old_gen = gch->old_gen();
 921 
 922   // If the next generation is too full to accommodate worst-case promotion
 923   // from this generation, pass on collection; let the next generation
 924   // do it.
 925   if (!collection_attempt_is_safe()) {
 926     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 927     return;
 928   }
 929   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 930 
 931   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 932   gch->trace_heap_before_gc(gc_tracer());
 933 
 934   init_assuming_no_promotion_failure();
 935 
 936   if (UseAdaptiveSizePolicy) {
 937     set_survivor_overflow(false);
 938     size_policy->minor_collection_begin();
 939   }
 940 
 941   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 942   // Capture heap used before collection (for printing).
 943   size_t gch_prev_used = gch->used();
 944 
 945   age_table()->clear();
 946   to()->clear(SpaceDecorator::Mangle);
 947 
 948   gch->save_marks();
 949   assert(workers != NULL, "Need parallel worker threads.");
 950   uint n_workers = active_workers;
 951 
 952   // Set the correct parallelism (number of queues) in the reference processor
 953   ref_processor()->set_active_mt_degree(n_workers);
 954 
 955   // Always set the terminator for the active number of workers
 956   // because only those workers go through the termination protocol.
 957   ParallelTaskTerminator _term(n_workers, task_queues());
 958   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 959                                          *to(), *this, *_old_gen, *task_queues(),
 960                                          _overflow_stacks, desired_plab_sz(), _term);
 961 
 962   ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
 963   gch->set_par_threads(n_workers);
 964   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 965   // It turns out that even when we're using 1 thread, doing the work in a
 966   // separate thread causes wide variance in run times.  We can't help this
 967   // in the multi-threaded case, but we special-case n=1 here to get
 968   // repeatable measurements of the 1-thread overhead of the parallel code.
 969   if (n_workers > 1) {
 970     StrongRootsScope srs;


< prev index next >