< prev index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 8138 : 8077842: Remove the level parameter passed around in GenCollectedHeap
Reviewed-by:


  44 #include "memory/resourceArea.hpp"
  45 #include "memory/strongRootsScope.hpp"
  46 #include "memory/space.hpp"
  47 #include "oops/objArrayOop.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/atomic.inline.hpp"
  50 #include "runtime/handles.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/thread.inline.hpp"
  54 #include "utilities/copy.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 #include "utilities/workgroup.hpp"
  58 
  59 #ifdef _MSC_VER
  60 #pragma warning( push )
  61 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  62 #endif
  63 ParScanThreadState::ParScanThreadState(Space* to_space_,
  64                                        ParNewGeneration* gen_,
  65                                        Generation* old_gen_,
  66                                        int thread_num_,
  67                                        ObjToScanQueueSet* work_queue_set_,
  68                                        Stack<oop, mtGC>* overflow_stacks_,
  69                                        size_t desired_plab_sz_,
  70                                        ParallelTaskTerminator& term_) :
  71   _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
  72   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
  73   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  74   _ageTable(false), // false ==> not the global age table, no perf data.
  75   _to_space_alloc_buffer(desired_plab_sz_),
  76   _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
  77   _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
  78   _older_gen_closure(gen_, this),
  79   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
  80                       &_to_space_root_closure, gen_, &_old_gen_root_closure,
  81                       work_queue_set_, &term_),
  82   _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
  83   _keep_alive_closure(&_scan_weak_ref_closure),
  84   _strong_roots_time(0.0), _term_time(0.0)
  85 {
  86   #if TASKQUEUE_STATS
  87   _term_attempts = 0;
  88   _overflow_refills = 0;
  89   _overflow_refill_objs = 0;
  90   #endif // TASKQUEUE_STATS
  91 
  92   _survivor_chunk_array =
  93     (ChunkArray*) old_gen()->get_data_recorder(thread_num());
  94   _hash_seed = 17;  // Might want to take time-based random value.
  95   _start = os::elapsedTime();
  96   _old_gen_closure.set_generation(old_gen_);
  97   _old_gen_root_closure.set_generation(old_gen_);
  98 }
  99 #ifdef _MSC_VER
 100 #pragma warning( pop )
 101 #endif
 102 


 471 
 472     // Inform old gen that we're done.
 473     _old_gen.par_promote_alloc_done(i);
 474     _old_gen.par_oop_since_save_marks_iterate_done(i);
 475   }
 476 
 477   if (UseConcMarkSweepGC) {
 478     // We need to call this even when ResizeOldPLAB is disabled
 479     // so as to avoid breaking some asserts. While we may be able
 480     // to avoid this by reorganizing the code a bit, I am loathe
 481     // to do that unless we find cases where ergo leads to bad
 482     // performance.
 483     CFLS_LAB::compute_desired_plab_size();
 484   }
 485 }
 486 
 487 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 488                                ParScanThreadState* par_scan_state) :
 489   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
 490 {
 491   assert(_g->level() == 0, "Optimized for youngest generation");
 492   _boundary = _g->reserved().end();
 493 }
 494 
 495 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 496 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 497 
 498 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 499 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 500 
 501 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 502 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 503 
 504 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 505 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
 506 
 507 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
 508                                              ParScanThreadState* par_scan_state)
 509   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
 510 {}
 511 


 556 
 557       //   if successful, goto Start.
 558       continue;
 559 
 560       // try global overflow list.
 561     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
 562       continue;
 563     }
 564 
 565     // Otherwise, offer termination.
 566     par_scan_state()->start_term_time();
 567     if (terminator()->offer_termination()) break;
 568     par_scan_state()->end_term_time();
 569   }
 570   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 571          "Broken overflow list?");
 572   // Finish the last termination pause.
 573   par_scan_state()->end_term_time();
 574 }
 575 
 576 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
 577                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 578     AbstractGangTask("ParNewGeneration collection"),
 579     _gen(gen), _old_gen(old_gen),
 580     _young_old_boundary(young_old_boundary),
 581     _state_set(state_set)
 582   {}
 583 
 584 // Reset the terminator for the given number of
 585 // active threads.
 586 void ParNewGenTask::set_for_termination(int active_workers) {
 587   _state_set->reset(active_workers, _gen->promotion_failed());
 588   // Should the heap be passed in?  There's only 1 for now so
 589   // grab it instead.
 590   GenCollectedHeap* gch = GenCollectedHeap::heap();
 591   gch->set_n_termination(active_workers);
 592 }
 593 
 594 void ParNewGenTask::work(uint worker_id) {
 595   GenCollectedHeap* gch = GenCollectedHeap::heap();
 596   // Since this is being done in a separate thread, need new resource
 597   // and handle marks.
 598   ResourceMark rm;
 599   HandleMark hm;
 600 
 601   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 602   assert(_state_set->is_valid(worker_id), "Should not have been called");
 603 
 604   par_scan_state.set_young_old_boundary(_young_old_boundary);
 605 
 606   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 607                                       gch->rem_set()->klass_rem_set());
 608   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 609                                            &par_scan_state.to_space_root_closure(),
 610                                            false);
 611 
 612   par_scan_state.start_strong_roots();
 613   gch->gen_process_roots(_gen->level(),
 614                          true,  // Process younger gens, if any,
 615                                 // as strong roots.
 616                          false, // no scope; this is parallel code
 617                          GenCollectedHeap::SO_ScavengeCodeCache,
 618                          GenCollectedHeap::StrongAndWeakRoots,
 619                          &par_scan_state.to_space_root_closure(),
 620                          &par_scan_state.older_gen_closure(),
 621                          &cld_scan_closure);
 622 
 623   par_scan_state.end_strong_roots();
 624 
 625   // "evacuate followers".
 626   par_scan_state.evacuate_followers_closure().do_void();
 627 }
 628 
 629 #ifdef _MSC_VER
 630 #pragma warning( push )
 631 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 632 #endif
 633 ParNewGeneration::
 634 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
 635   : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
 636   _overflow_list(NULL),
 637   _is_alive_closure(this),
 638   _plab_stats(YoungPLABSize, PLABWeight)
 639 {
 640   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
 641   NOT_PRODUCT(_num_par_pushes = 0;)
 642   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
 643   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 644 
 645   for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
 646     ObjToScanQueue *q = new ObjToScanQueue();
 647     guarantee(q != NULL, "work_queue Allocation failure.");
 648     _task_queues->register_queue(i1, q);
 649   }
 650 
 651   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
 652     _task_queues->queue(i2)->initialize();
 653 
 654   _overflow_stacks = NULL;
 655   if (ParGCUseLocalOverflow) {


 753 }
 754 
 755 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 756 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 757 
 758 class ParNewRefProcTaskProxy: public AbstractGangTask {
 759   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 760 public:
 761   ParNewRefProcTaskProxy(ProcessTask& task,
 762                          ParNewGeneration& gen,
 763                          Generation& old_gen,
 764                          HeapWord* young_old_boundary,
 765                          ParScanThreadStateSet& state_set);
 766 
 767 private:
 768   virtual void work(uint worker_id);
 769   virtual void set_for_termination(int active_workers) {
 770     _state_set.terminator()->reset_for_reuse(active_workers);
 771   }
 772 private:
 773   ParNewGeneration&      _gen;
 774   ProcessTask&           _task;
 775   Generation&            _old_gen;
 776   HeapWord*              _young_old_boundary;
 777   ParScanThreadStateSet& _state_set;
 778 };
 779 
 780 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 781                                                ParNewGeneration& gen,
 782                                                Generation& old_gen,
 783                                                HeapWord* young_old_boundary,
 784                                                ParScanThreadStateSet& state_set)
 785   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 786     _gen(gen),
 787     _task(task),
 788     _old_gen(old_gen),
 789     _young_old_boundary(young_old_boundary),
 790     _state_set(state_set)
 791 {
 792 }
 793 
 794 void ParNewRefProcTaskProxy::work(uint worker_id)
 795 {
 796   ResourceMark rm;
 797   HandleMark hm;
 798   ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
 799   par_scan_state.set_young_old_boundary(_young_old_boundary);
 800   _task.work(worker_id, par_scan_state.is_alive_closure(),
 801              par_scan_state.keep_alive_closure(),
 802              par_scan_state.evacuate_followers_closure());
 803 }
 804 
 805 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 806   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;


 808 
 809 public:
 810   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 811     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 812       _task(task)
 813   { }
 814 
 815   virtual void work(uint worker_id)
 816   {
 817     _task.work(worker_id);
 818   }
 819 };
 820 
 821 
 822 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
 823 {
 824   GenCollectedHeap* gch = GenCollectedHeap::heap();
 825   FlexibleWorkGang* workers = gch->workers();
 826   assert(workers != NULL, "Need parallel worker threads.");
 827   _state_set.reset(workers->active_workers(), _generation.promotion_failed());
 828   ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
 829                                  _generation.reserved().end(), _state_set);
 830   workers->run_task(&rp_task);
 831   _state_set.reset(0 /* bad value in debug if not reset */,
 832                    _generation.promotion_failed());
 833 }
 834 
 835 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
 836 {
 837   GenCollectedHeap* gch = GenCollectedHeap::heap();
 838   FlexibleWorkGang* workers = gch->workers();
 839   assert(workers != NULL, "Need parallel worker threads.");
 840   ParNewRefEnqueueTaskProxy enq_task(task);
 841   workers->run_task(&enq_task);
 842 }
 843 
 844 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
 845 {
 846   _state_set.flush();
 847   GenCollectedHeap* gch = GenCollectedHeap::heap();
 848   gch->set_par_threads(0);  // 0 ==> non-parallel.
 849   gch->save_marks();
 850 }
 851 
 852 ScanClosureWithParBarrier::
 853 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 854   ScanClosure(g, gc_barrier) {}
 855 
 856 EvacuateFollowersClosureGeneral::
 857 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
 858                                 OopsInGenClosure* cur,
 859                                 OopsInGenClosure* older) :
 860   _gch(gch), _level(level),
 861   _scan_cur_or_nonheap(cur), _scan_older(older)
 862 {}
 863 
 864 void EvacuateFollowersClosureGeneral::do_void() {
 865   do {
 866     // Beware: this call will lead to closure applications via virtual
 867     // calls.
 868     _gch->oop_since_save_marks_iterate(_level,
 869                                        _scan_cur_or_nonheap,
 870                                        _scan_older);
 871   } while (!_gch->no_allocs_since_save_marks(_level));
 872 }
 873 
 874 
 875 // A Generation that does parallel young-gen collection.
 876 
 877 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 878   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 879   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 880 
 881   remove_forwarding_pointers();
 882   if (PrintGCDetails) {
 883     gclog_or_tty->print(" (promotion failed)");
 884   }
 885   // All the spaces are in play for mark-sweep.
 886   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 887   from()->set_next_compaction_space(to());
 888   gch->set_incremental_collection_failed();
 889   // Inform the next generation that a promotion failure occurred.
 890   _old_gen->promotion_failure_occurred();
 891 


 972   } else {
 973     StrongRootsScope srs;
 974     tsk.work(0);
 975   }
 976   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 977                          promotion_failed());
 978 
 979   // Trace and reset failed promotion info.
 980   if (promotion_failed()) {
 981     thread_state_set.trace_promotion_failed(gc_tracer());
 982   }
 983 
 984   // Process (weak) reference objects found during scavenge.
 985   ReferenceProcessor* rp = ref_processor();
 986   IsAliveClosure is_alive(this);
 987   ScanWeakRefClosure scan_weak_ref(this);
 988   KeepAliveClosure keep_alive(&scan_weak_ref);
 989   ScanClosure               scan_without_gc_barrier(this, false);
 990   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 991   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 992   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
 993     &scan_without_gc_barrier, &scan_with_gc_barrier);
 994   rp->setup_policy(clear_all_soft_refs);
 995   // Can  the mt_degree be set later (at run_task() time would be best)?
 996   rp->set_active_mt_degree(active_workers);
 997   ReferenceProcessorStats stats;
 998   if (rp->processing_is_mt()) {
 999     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1000     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1001                                               &evacuate_followers, &task_executor,
1002                                               _gc_timer, _gc_tracer.gc_id());
1003   } else {
1004     thread_state_set.flush();
1005     gch->set_par_threads(0);  // 0 ==> non-parallel.
1006     gch->save_marks();
1007     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1008                                               &evacuate_followers, NULL,
1009                                               _gc_timer, _gc_tracer.gc_id());
1010   }
1011   _gc_tracer.report_gc_reference_stats(stats);
1012   if (!promotion_failed()) {




  44 #include "memory/resourceArea.hpp"
  45 #include "memory/strongRootsScope.hpp"
  46 #include "memory/space.hpp"
  47 #include "oops/objArrayOop.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/atomic.inline.hpp"
  50 #include "runtime/handles.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/thread.inline.hpp"
  54 #include "utilities/copy.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 #include "utilities/workgroup.hpp"
  58 
  59 #ifdef _MSC_VER
  60 #pragma warning( push )
  61 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  62 #endif
  63 ParScanThreadState::ParScanThreadState(Space* to_space_,
  64                                        ParNewGeneration* young_gen_,
  65                                        Generation* old_gen_,
  66                                        int thread_num_,
  67                                        ObjToScanQueueSet* work_queue_set_,
  68                                        Stack<oop, mtGC>* overflow_stacks_,
  69                                        size_t desired_plab_sz_,
  70                                        ParallelTaskTerminator& term_) :
  71   _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
  72   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
  73   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  74   _ageTable(false), // false ==> not the global age table, no perf data.
  75   _to_space_alloc_buffer(desired_plab_sz_),
  76   _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
  77   _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
  78   _older_gen_closure(young_gen_, this),
  79   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
  80                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
  81                       work_queue_set_, &term_),
  82   _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
  83   _keep_alive_closure(&_scan_weak_ref_closure),
  84   _strong_roots_time(0.0), _term_time(0.0)
  85 {
  86   #if TASKQUEUE_STATS
  87   _term_attempts = 0;
  88   _overflow_refills = 0;
  89   _overflow_refill_objs = 0;
  90   #endif // TASKQUEUE_STATS
  91 
  92   _survivor_chunk_array =
  93     (ChunkArray*) old_gen()->get_data_recorder(thread_num());
  94   _hash_seed = 17;  // Might want to take time-based random value.
  95   _start = os::elapsedTime();
  96   _old_gen_closure.set_generation(old_gen_);
  97   _old_gen_root_closure.set_generation(old_gen_);
  98 }
  99 #ifdef _MSC_VER
 100 #pragma warning( pop )
 101 #endif
 102 


 471 
 472     // Inform old gen that we're done.
 473     _old_gen.par_promote_alloc_done(i);
 474     _old_gen.par_oop_since_save_marks_iterate_done(i);
 475   }
 476 
 477   if (UseConcMarkSweepGC) {
 478     // We need to call this even when ResizeOldPLAB is disabled
 479     // so as to avoid breaking some asserts. While we may be able
 480     // to avoid this by reorganizing the code a bit, I am loathe
 481     // to do that unless we find cases where ergo leads to bad
 482     // performance.
 483     CFLS_LAB::compute_desired_plab_size();
 484   }
 485 }
 486 
 487 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 488                                ParScanThreadState* par_scan_state) :
 489   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
 490 {

 491   _boundary = _g->reserved().end();
 492 }
 493 
 494 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 495 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 496 
 497 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 498 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 499 
 500 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 501 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 502 
 503 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 504 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
 505 
 506 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
 507                                              ParScanThreadState* par_scan_state)
 508   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
 509 {}
 510 


 555 
 556       //   if successful, goto Start.
 557       continue;
 558 
 559       // try global overflow list.
 560     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
 561       continue;
 562     }
 563 
 564     // Otherwise, offer termination.
 565     par_scan_state()->start_term_time();
 566     if (terminator()->offer_termination()) break;
 567     par_scan_state()->end_term_time();
 568   }
 569   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 570          "Broken overflow list?");
 571   // Finish the last termination pause.
 572   par_scan_state()->end_term_time();
 573 }
 574 
 575 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
 576                              HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 577     AbstractGangTask("ParNewGeneration collection"),
 578     _young_gen(young_gen), _old_gen(old_gen),
 579     _young_old_boundary(young_old_boundary),
 580     _state_set(state_set)
 581   {}
 582 
 583 // Reset the terminator for the given number of
 584 // active threads.
 585 void ParNewGenTask::set_for_termination(int active_workers) {
 586   _state_set->reset(active_workers, _young_gen->promotion_failed());
 587   // Should the heap be passed in?  There's only 1 for now so
 588   // grab it instead.
 589   GenCollectedHeap* gch = GenCollectedHeap::heap();
 590   gch->set_n_termination(active_workers);
 591 }
 592 
 593 void ParNewGenTask::work(uint worker_id) {
 594   GenCollectedHeap* gch = GenCollectedHeap::heap();
 595   // Since this is being done in a separate thread, need new resource
 596   // and handle marks.
 597   ResourceMark rm;
 598   HandleMark hm;
 599 
 600   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 601   assert(_state_set->is_valid(worker_id), "Should not have been called");
 602 
 603   par_scan_state.set_young_old_boundary(_young_old_boundary);
 604 
 605   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 606                                       gch->rem_set()->klass_rem_set());
 607   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 608                                            &par_scan_state.to_space_root_closure(),
 609                                            false);
 610 
 611   par_scan_state.start_strong_roots();
 612   gch->gen_process_roots(Generation::Young,
 613                          true,  // Process younger gens, if any,
 614                                 // as strong roots.
 615                          false, // no scope; this is parallel code
 616                          GenCollectedHeap::SO_ScavengeCodeCache,
 617                          GenCollectedHeap::StrongAndWeakRoots,
 618                          &par_scan_state.to_space_root_closure(),
 619                          &par_scan_state.older_gen_closure(),
 620                          &cld_scan_closure);
 621 
 622   par_scan_state.end_strong_roots();
 623 
 624   // "evacuate followers".
 625   par_scan_state.evacuate_followers_closure().do_void();
 626 }
 627 
 628 #ifdef _MSC_VER
 629 #pragma warning( push )
 630 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 631 #endif
 632 ParNewGeneration::
 633 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
 634   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
 635   _overflow_list(NULL),
 636   _is_alive_closure(this),
 637   _plab_stats(YoungPLABSize, PLABWeight)
 638 {
 639   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
 640   NOT_PRODUCT(_num_par_pushes = 0;)
 641   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
 642   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 643 
 644   for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
 645     ObjToScanQueue *q = new ObjToScanQueue();
 646     guarantee(q != NULL, "work_queue Allocation failure.");
 647     _task_queues->register_queue(i1, q);
 648   }
 649 
 650   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
 651     _task_queues->queue(i2)->initialize();
 652 
 653   _overflow_stacks = NULL;
 654   if (ParGCUseLocalOverflow) {


 752 }
 753 
 754 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 755 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 756 
 757 class ParNewRefProcTaskProxy: public AbstractGangTask {
 758   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 759 public:
 760   ParNewRefProcTaskProxy(ProcessTask& task,
 761                          ParNewGeneration& gen,
 762                          Generation& old_gen,
 763                          HeapWord* young_old_boundary,
 764                          ParScanThreadStateSet& state_set);
 765 
 766 private:
 767   virtual void work(uint worker_id);
 768   virtual void set_for_termination(int active_workers) {
 769     _state_set.terminator()->reset_for_reuse(active_workers);
 770   }
 771 private:
 772   ParNewGeneration&      _young_gen;
 773   ProcessTask&           _task;
 774   Generation&            _old_gen;
 775   HeapWord*              _young_old_boundary;
 776   ParScanThreadStateSet& _state_set;
 777 };
 778 
 779 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 780                                                ParNewGeneration& young_gen,
 781                                                Generation& old_gen,
 782                                                HeapWord* young_old_boundary,
 783                                                ParScanThreadStateSet& state_set)
 784   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 785     _young_gen(young_gen),
 786     _task(task),
 787     _old_gen(old_gen),
 788     _young_old_boundary(young_old_boundary),
 789     _state_set(state_set)
 790 {
 791 }
 792 
 793 void ParNewRefProcTaskProxy::work(uint worker_id)
 794 {
 795   ResourceMark rm;
 796   HandleMark hm;
 797   ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
 798   par_scan_state.set_young_old_boundary(_young_old_boundary);
 799   _task.work(worker_id, par_scan_state.is_alive_closure(),
 800              par_scan_state.keep_alive_closure(),
 801              par_scan_state.evacuate_followers_closure());
 802 }
 803 
 804 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 805   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;


 807 
 808 public:
 809   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 810     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 811       _task(task)
 812   { }
 813 
 814   virtual void work(uint worker_id)
 815   {
 816     _task.work(worker_id);
 817   }
 818 };
 819 
 820 
 821 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
 822 {
 823   GenCollectedHeap* gch = GenCollectedHeap::heap();
 824   FlexibleWorkGang* workers = gch->workers();
 825   assert(workers != NULL, "Need parallel worker threads.");
 826   _state_set.reset(workers->active_workers(), _generation.promotion_failed());
 827   ParNewRefProcTaskProxy rp_task(task, _generation, *(gch->old_gen()),
 828                                  _generation.reserved().end(), _state_set);
 829   workers->run_task(&rp_task);
 830   _state_set.reset(0 /* bad value in debug if not reset */,
 831                    _generation.promotion_failed());
 832 }
 833 
 834 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
 835 {
 836   GenCollectedHeap* gch = GenCollectedHeap::heap();
 837   FlexibleWorkGang* workers = gch->workers();
 838   assert(workers != NULL, "Need parallel worker threads.");
 839   ParNewRefEnqueueTaskProxy enq_task(task);
 840   workers->run_task(&enq_task);
 841 }
 842 
 843 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
 844 {
 845   _state_set.flush();
 846   GenCollectedHeap* gch = GenCollectedHeap::heap();
 847   gch->set_par_threads(0);  // 0 ==> non-parallel.
 848   gch->save_marks();
 849 }
 850 
 851 ScanClosureWithParBarrier::
 852 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 853   ScanClosure(g, gc_barrier) {}
 854 
 855 EvacuateFollowersClosureGeneral::
 856 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
 857                                 OopsInGenClosure* cur,
 858                                 OopsInGenClosure* older) :
 859   _gch(gch),
 860   _scan_cur_or_nonheap(cur), _scan_older(older)
 861 {}
 862 
 863 void EvacuateFollowersClosureGeneral::do_void() {
 864   do {
 865     // Beware: this call will lead to closure applications via virtual
 866     // calls.
 867     _gch->oop_since_save_marks_iterate(Generation::Young,
 868                                        _scan_cur_or_nonheap,
 869                                        _scan_older);
 870   } while (!_gch->no_allocs_since_save_marks(true /* include_young */));
 871 }
 872 
 873 
 874 // A Generation that does parallel young-gen collection.
 875 
 876 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 877   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 878   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 879 
 880   remove_forwarding_pointers();
 881   if (PrintGCDetails) {
 882     gclog_or_tty->print(" (promotion failed)");
 883   }
 884   // All the spaces are in play for mark-sweep.
 885   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 886   from()->set_next_compaction_space(to());
 887   gch->set_incremental_collection_failed();
 888   // Inform the next generation that a promotion failure occurred.
 889   _old_gen->promotion_failure_occurred();
 890 


 971   } else {
 972     StrongRootsScope srs;
 973     tsk.work(0);
 974   }
 975   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 976                          promotion_failed());
 977 
 978   // Trace and reset failed promotion info.
 979   if (promotion_failed()) {
 980     thread_state_set.trace_promotion_failed(gc_tracer());
 981   }
 982 
 983   // Process (weak) reference objects found during scavenge.
 984   ReferenceProcessor* rp = ref_processor();
 985   IsAliveClosure is_alive(this);
 986   ScanWeakRefClosure scan_weak_ref(this);
 987   KeepAliveClosure keep_alive(&scan_weak_ref);
 988   ScanClosure               scan_without_gc_barrier(this, false);
 989   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 990   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 991   EvacuateFollowersClosureGeneral evacuate_followers(gch,
 992     &scan_without_gc_barrier, &scan_with_gc_barrier);
 993   rp->setup_policy(clear_all_soft_refs);
 994   // Can  the mt_degree be set later (at run_task() time would be best)?
 995   rp->set_active_mt_degree(active_workers);
 996   ReferenceProcessorStats stats;
 997   if (rp->processing_is_mt()) {
 998     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
 999     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1000                                               &evacuate_followers, &task_executor,
1001                                               _gc_timer, _gc_tracer.gc_id());
1002   } else {
1003     thread_state_set.flush();
1004     gch->set_par_threads(0);  // 0 ==> non-parallel.
1005     gch->save_marks();
1006     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1007                                               &evacuate_followers, NULL,
1008                                               _gc_timer, _gc_tracer.gc_id());
1009   }
1010   _gc_tracer.report_gc_reference_stats(stats);
1011   if (!promotion_failed()) {


< prev index next >