819 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
820 _generation.reserved().end(), _state_set);
821 workers->run_task(&rp_task);
822 _state_set.reset(0 /* bad value in debug if not reset */,
823 _generation.promotion_failed());
824 }
825
826 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
827 {
828 GenCollectedHeap* gch = GenCollectedHeap::heap();
829 FlexibleWorkGang* workers = gch->workers();
830 assert(workers != NULL, "Need parallel worker threads.");
831 ParNewRefEnqueueTaskProxy enq_task(task);
832 workers->run_task(&enq_task);
833 }
834
835 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
836 {
837 _state_set.flush();
838 GenCollectedHeap* gch = GenCollectedHeap::heap();
839 gch->set_par_threads(0); // 0 ==> non-parallel.
840 gch->save_marks();
841 }
842
843 ScanClosureWithParBarrier::
844 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
845 ScanClosure(g, gc_barrier) {}
846
847 EvacuateFollowersClosureGeneral::
848 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
849 OopsInGenClosure* cur,
850 OopsInGenClosure* older) :
851 _gch(gch), _level(level),
852 _scan_cur_or_nonheap(cur), _scan_older(older)
853 {}
854
855 void EvacuateFollowersClosureGeneral::do_void() {
856 do {
857 // Beware: this call will lead to closure applications via virtual
858 // calls.
859 _gch->oop_since_save_marks_iterate(_level,
937 to()->clear(SpaceDecorator::Mangle);
938
939 gch->save_marks();
940 assert(workers != NULL, "Need parallel worker threads.");
941 uint n_workers = active_workers;
942
943 // Set the correct parallelism (number of queues) in the reference processor
944 ref_processor()->set_active_mt_degree(n_workers);
945
946 // Always set the terminator for the active number of workers
947 // because only those workers go through the termination protocol.
948 ParallelTaskTerminator _term(n_workers, task_queues());
949 ParScanThreadStateSet thread_state_set(workers->active_workers(),
950 *to(), *this, *_old_gen, *task_queues(),
951 _overflow_stacks, desired_plab_sz(), _term);
952
953 {
954 StrongRootsScope srs(n_workers);
955
956 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
957 gch->set_par_threads(n_workers);
958 gch->rem_set()->prepare_for_younger_refs_iterate(true);
959 // It turns out that even when we're using 1 thread, doing the work in a
960 // separate thread causes wide variance in run times. We can't help this
961 // in the multi-threaded case, but we special-case n=1 here to get
962 // repeatable measurements of the 1-thread overhead of the parallel code.
963 if (n_workers > 1) {
964 workers->run_task(&tsk);
965 } else {
966 tsk.work(0);
967 }
968 }
969
970 thread_state_set.reset(0 /* Bad value in debug if not reset */,
971 promotion_failed());
972
973 // Trace and reset failed promotion info.
974 if (promotion_failed()) {
975 thread_state_set.trace_promotion_failed(gc_tracer());
976 }
977
979 ReferenceProcessor* rp = ref_processor();
980 IsAliveClosure is_alive(this);
981 ScanWeakRefClosure scan_weak_ref(this);
982 KeepAliveClosure keep_alive(&scan_weak_ref);
983 ScanClosure scan_without_gc_barrier(this, false);
984 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
985 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
986 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
987 &scan_without_gc_barrier, &scan_with_gc_barrier);
988 rp->setup_policy(clear_all_soft_refs);
989 // Can the mt_degree be set later (at run_task() time would be best)?
990 rp->set_active_mt_degree(active_workers);
991 ReferenceProcessorStats stats;
992 if (rp->processing_is_mt()) {
993 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
994 stats = rp->process_discovered_references(&is_alive, &keep_alive,
995 &evacuate_followers, &task_executor,
996 _gc_timer, _gc_tracer.gc_id());
997 } else {
998 thread_state_set.flush();
999 gch->set_par_threads(0); // 0 ==> non-parallel.
1000 gch->save_marks();
1001 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1002 &evacuate_followers, NULL,
1003 _gc_timer, _gc_tracer.gc_id());
1004 }
1005 _gc_tracer.report_gc_reference_stats(stats);
1006 if (!promotion_failed()) {
1007 // Swap the survivor spaces.
1008 eden()->clear(SpaceDecorator::Mangle);
1009 from()->clear(SpaceDecorator::Mangle);
1010 if (ZapUnusedHeapArea) {
1011 // This is now done here because of the piece-meal mangling which
1012 // can check for valid mangling at intermediate points in the
1013 // collection(s). When a minor collection fails to collect
1014 // sufficient space resizing of the young generation can occur
1015 // an redistribute the spaces in the young generation. Mangle
1016 // here so that unzapped regions don't get distributed to
1017 // other spaces.
1018 to()->mangle_unused_area();
1019 }
|
819 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
820 _generation.reserved().end(), _state_set);
821 workers->run_task(&rp_task);
822 _state_set.reset(0 /* bad value in debug if not reset */,
823 _generation.promotion_failed());
824 }
825
826 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
827 {
828 GenCollectedHeap* gch = GenCollectedHeap::heap();
829 FlexibleWorkGang* workers = gch->workers();
830 assert(workers != NULL, "Need parallel worker threads.");
831 ParNewRefEnqueueTaskProxy enq_task(task);
832 workers->run_task(&enq_task);
833 }
834
835 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
836 {
837 _state_set.flush();
838 GenCollectedHeap* gch = GenCollectedHeap::heap();
839 gch->save_marks();
840 }
841
842 ScanClosureWithParBarrier::
843 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
844 ScanClosure(g, gc_barrier) {}
845
846 EvacuateFollowersClosureGeneral::
847 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
848 OopsInGenClosure* cur,
849 OopsInGenClosure* older) :
850 _gch(gch), _level(level),
851 _scan_cur_or_nonheap(cur), _scan_older(older)
852 {}
853
854 void EvacuateFollowersClosureGeneral::do_void() {
855 do {
856 // Beware: this call will lead to closure applications via virtual
857 // calls.
858 _gch->oop_since_save_marks_iterate(_level,
936 to()->clear(SpaceDecorator::Mangle);
937
938 gch->save_marks();
939 assert(workers != NULL, "Need parallel worker threads.");
940 uint n_workers = active_workers;
941
942 // Set the correct parallelism (number of queues) in the reference processor
943 ref_processor()->set_active_mt_degree(n_workers);
944
945 // Always set the terminator for the active number of workers
946 // because only those workers go through the termination protocol.
947 ParallelTaskTerminator _term(n_workers, task_queues());
948 ParScanThreadStateSet thread_state_set(workers->active_workers(),
949 *to(), *this, *_old_gen, *task_queues(),
950 _overflow_stacks, desired_plab_sz(), _term);
951
952 {
953 StrongRootsScope srs(n_workers);
954
955 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
956 gch->rem_set()->prepare_for_younger_refs_iterate(true);
957 // It turns out that even when we're using 1 thread, doing the work in a
958 // separate thread causes wide variance in run times. We can't help this
959 // in the multi-threaded case, but we special-case n=1 here to get
960 // repeatable measurements of the 1-thread overhead of the parallel code.
961 if (n_workers > 1) {
962 workers->run_task(&tsk);
963 } else {
964 tsk.work(0);
965 }
966 }
967
968 thread_state_set.reset(0 /* Bad value in debug if not reset */,
969 promotion_failed());
970
971 // Trace and reset failed promotion info.
972 if (promotion_failed()) {
973 thread_state_set.trace_promotion_failed(gc_tracer());
974 }
975
977 ReferenceProcessor* rp = ref_processor();
978 IsAliveClosure is_alive(this);
979 ScanWeakRefClosure scan_weak_ref(this);
980 KeepAliveClosure keep_alive(&scan_weak_ref);
981 ScanClosure scan_without_gc_barrier(this, false);
982 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
983 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
984 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
985 &scan_without_gc_barrier, &scan_with_gc_barrier);
986 rp->setup_policy(clear_all_soft_refs);
987 // Can the mt_degree be set later (at run_task() time would be best)?
988 rp->set_active_mt_degree(active_workers);
989 ReferenceProcessorStats stats;
990 if (rp->processing_is_mt()) {
991 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
992 stats = rp->process_discovered_references(&is_alive, &keep_alive,
993 &evacuate_followers, &task_executor,
994 _gc_timer, _gc_tracer.gc_id());
995 } else {
996 thread_state_set.flush();
997 gch->save_marks();
998 stats = rp->process_discovered_references(&is_alive, &keep_alive,
999 &evacuate_followers, NULL,
1000 _gc_timer, _gc_tracer.gc_id());
1001 }
1002 _gc_tracer.report_gc_reference_stats(stats);
1003 if (!promotion_failed()) {
1004 // Swap the survivor spaces.
1005 eden()->clear(SpaceDecorator::Mangle);
1006 from()->clear(SpaceDecorator::Mangle);
1007 if (ZapUnusedHeapArea) {
1008 // This is now done here because of the piece-meal mangling which
1009 // can check for valid mangling at intermediate points in the
1010 // collection(s). When a minor collection fails to collect
1011 // sufficient space resizing of the young generation can occur
1012 // an redistribute the spaces in the young generation. Mangle
1013 // here so that unzapped regions don't get distributed to
1014 // other spaces.
1015 to()->mangle_unused_area();
1016 }
|