550 // if successful, goto Start.
551 continue;
552
553 // try global overflow list.
554 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
555 continue;
556 }
557
558 // Otherwise, offer termination.
559 par_scan_state()->start_term_time();
560 if (terminator()->offer_termination()) break;
561 par_scan_state()->end_term_time();
562 }
563 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
564 "Broken overflow list?");
565 // Finish the last termination pause.
566 par_scan_state()->end_term_time();
567 }
568
569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
570 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
571 AbstractGangTask("ParNewGeneration collection"),
572 _gen(gen), _old_gen(old_gen),
573 _young_old_boundary(young_old_boundary),
574 _state_set(state_set)
575 {}
576
577 // Reset the terminator for the given number of
578 // active threads.
579 void ParNewGenTask::set_for_termination(uint active_workers) {
580 _state_set->reset(active_workers, _gen->promotion_failed());
581 // Should the heap be passed in? There's only 1 for now so
582 // grab it instead.
583 GenCollectedHeap* gch = GenCollectedHeap::heap();
584 gch->set_n_termination(active_workers);
585 }
586
587 void ParNewGenTask::work(uint worker_id) {
588 GenCollectedHeap* gch = GenCollectedHeap::heap();
589 // Since this is being done in a separate thread, need new resource
590 // and handle marks.
591 ResourceMark rm;
592 HandleMark hm;
593
594 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
595 assert(_state_set->is_valid(worker_id), "Should not have been called");
596
597 par_scan_state.set_young_old_boundary(_young_old_boundary);
598
599 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
600 gch->rem_set()->klass_rem_set());
601 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
602 &par_scan_state.to_space_root_closure(),
603 false);
604
605 par_scan_state.start_strong_roots();
606 gch->gen_process_roots(_gen->level(),
607 true, // Process younger gens, if any,
608 // as strong roots.
609 false, // no scope; this is parallel code
610 GenCollectedHeap::SO_ScavengeCodeCache,
611 GenCollectedHeap::StrongAndWeakRoots,
612 &par_scan_state.to_space_root_closure(),
613 &par_scan_state.older_gen_closure(),
614 &cld_scan_closure);
615
616 par_scan_state.end_strong_roots();
617
618 // "evacuate followers".
619 par_scan_state.evacuate_followers_closure().do_void();
620 }
621
622 #ifdef _MSC_VER
623 #pragma warning( push )
624 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
625 #endif
626 ParNewGeneration::
627 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
628 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
629 _overflow_list(NULL),
821 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
822 _generation.reserved().end(), _state_set);
823 workers->run_task(&rp_task);
824 _state_set.reset(0 /* bad value in debug if not reset */,
825 _generation.promotion_failed());
826 }
827
828 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
829 {
830 GenCollectedHeap* gch = GenCollectedHeap::heap();
831 FlexibleWorkGang* workers = gch->workers();
832 assert(workers != NULL, "Need parallel worker threads.");
833 ParNewRefEnqueueTaskProxy enq_task(task);
834 workers->run_task(&enq_task);
835 }
836
837 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
838 {
839 _state_set.flush();
840 GenCollectedHeap* gch = GenCollectedHeap::heap();
841 gch->set_par_threads(0); // 0 ==> non-parallel.
842 gch->save_marks();
843 }
844
845 ScanClosureWithParBarrier::
846 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
847 ScanClosure(g, gc_barrier) {}
848
849 EvacuateFollowersClosureGeneral::
850 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
851 OopsInGenClosure* cur,
852 OopsInGenClosure* older) :
853 _gch(gch), _level(level),
854 _scan_cur_or_nonheap(cur), _scan_older(older)
855 {}
856
857 void EvacuateFollowersClosureGeneral::do_void() {
858 do {
859 // Beware: this call will lead to closure applications via virtual
860 // calls.
861 _gch->oop_since_save_marks_iterate(_level,
935 // Capture heap used before collection (for printing).
936 size_t gch_prev_used = gch->used();
937
938 age_table()->clear();
939 to()->clear(SpaceDecorator::Mangle);
940
941 gch->save_marks();
942 assert(workers != NULL, "Need parallel worker threads.");
943 uint n_workers = active_workers;
944
945 // Set the correct parallelism (number of queues) in the reference processor
946 ref_processor()->set_active_mt_degree(n_workers);
947
948 // Always set the terminator for the active number of workers
949 // because only those workers go through the termination protocol.
950 ParallelTaskTerminator _term(n_workers, task_queues());
951 ParScanThreadStateSet thread_state_set(workers->active_workers(),
952 *to(), *this, *_old_gen, *task_queues(),
953 _overflow_stacks, desired_plab_sz(), _term);
954
955 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
956 gch->set_par_threads(n_workers);
957 gch->rem_set()->prepare_for_younger_refs_iterate(true);
958 // It turns out that even when we're using 1 thread, doing the work in a
959 // separate thread causes wide variance in run times. We can't help this
960 // in the multi-threaded case, but we special-case n=1 here to get
961 // repeatable measurements of the 1-thread overhead of the parallel code.
962 if (n_workers > 1) {
963 StrongRootsScope srs;
964 workers->run_task(&tsk);
965 } else {
966 StrongRootsScope srs;
967 tsk.work(0);
968 }
969 thread_state_set.reset(0 /* Bad value in debug if not reset */,
970 promotion_failed());
971
972 // Trace and reset failed promotion info.
973 if (promotion_failed()) {
974 thread_state_set.trace_promotion_failed(gc_tracer());
975 }
976
977 // Process (weak) reference objects found during scavenge.
978 ReferenceProcessor* rp = ref_processor();
979 IsAliveClosure is_alive(this);
980 ScanWeakRefClosure scan_weak_ref(this);
981 KeepAliveClosure keep_alive(&scan_weak_ref);
982 ScanClosure scan_without_gc_barrier(this, false);
983 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
984 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
985 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
986 &scan_without_gc_barrier, &scan_with_gc_barrier);
987 rp->setup_policy(clear_all_soft_refs);
988 // Can the mt_degree be set later (at run_task() time would be best)?
989 rp->set_active_mt_degree(active_workers);
990 ReferenceProcessorStats stats;
991 if (rp->processing_is_mt()) {
992 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
993 stats = rp->process_discovered_references(&is_alive, &keep_alive,
994 &evacuate_followers, &task_executor,
995 _gc_timer, _gc_tracer.gc_id());
996 } else {
997 thread_state_set.flush();
998 gch->set_par_threads(0); // 0 ==> non-parallel.
999 gch->save_marks();
1000 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1001 &evacuate_followers, NULL,
1002 _gc_timer, _gc_tracer.gc_id());
1003 }
1004 _gc_tracer.report_gc_reference_stats(stats);
1005 if (!promotion_failed()) {
1006 // Swap the survivor spaces.
1007 eden()->clear(SpaceDecorator::Mangle);
1008 from()->clear(SpaceDecorator::Mangle);
1009 if (ZapUnusedHeapArea) {
1010 // This is now done here because of the piece-meal mangling which
1011 // can check for valid mangling at intermediate points in the
1012 // collection(s). When a minor collection fails to collect
1013 // sufficient space resizing of the young generation can occur
1014 // an redistribute the spaces in the young generation. Mangle
1015 // here so that unzapped regions don't get distributed to
1016 // other spaces.
1017 to()->mangle_unused_area();
1018 }
|
550 // if successful, goto Start.
551 continue;
552
553 // try global overflow list.
554 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
555 continue;
556 }
557
558 // Otherwise, offer termination.
559 par_scan_state()->start_term_time();
560 if (terminator()->offer_termination()) break;
561 par_scan_state()->end_term_time();
562 }
563 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
564 "Broken overflow list?");
565 // Finish the last termination pause.
566 par_scan_state()->end_term_time();
567 }
568
569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
570 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
571 StrongRootsScope* strong_roots_scope) :
572 AbstractGangTask("ParNewGeneration collection"),
573 _gen(gen), _old_gen(old_gen),
574 _young_old_boundary(young_old_boundary),
575 _state_set(state_set),
576 _strong_roots_scope(strong_roots_scope)
577 {}
578
579 // Reset the terminator for the given number of
580 // active threads.
581 void ParNewGenTask::set_for_termination(uint active_workers) {
582 _state_set->reset(active_workers, _gen->promotion_failed());
583 }
584
585 void ParNewGenTask::work(uint worker_id) {
586 GenCollectedHeap* gch = GenCollectedHeap::heap();
587 // Since this is being done in a separate thread, need new resource
588 // and handle marks.
589 ResourceMark rm;
590 HandleMark hm;
591
592 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
593 assert(_state_set->is_valid(worker_id), "Should not have been called");
594
595 par_scan_state.set_young_old_boundary(_young_old_boundary);
596
597 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
598 gch->rem_set()->klass_rem_set());
599 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
600 &par_scan_state.to_space_root_closure(),
601 false);
602
603 par_scan_state.start_strong_roots();
604 gch->gen_process_roots(_strong_roots_scope,
605 _gen->level(),
606 true, // Process younger gens, if any,
607 // as strong roots.
608 GenCollectedHeap::SO_ScavengeCodeCache,
609 GenCollectedHeap::StrongAndWeakRoots,
610 &par_scan_state.to_space_root_closure(),
611 &par_scan_state.older_gen_closure(),
612 &cld_scan_closure);
613
614 par_scan_state.end_strong_roots();
615
616 // "evacuate followers".
617 par_scan_state.evacuate_followers_closure().do_void();
618 }
619
620 #ifdef _MSC_VER
621 #pragma warning( push )
622 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
623 #endif
624 ParNewGeneration::
625 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
626 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
627 _overflow_list(NULL),
819 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
820 _generation.reserved().end(), _state_set);
821 workers->run_task(&rp_task);
822 _state_set.reset(0 /* bad value in debug if not reset */,
823 _generation.promotion_failed());
824 }
825
826 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
827 {
828 GenCollectedHeap* gch = GenCollectedHeap::heap();
829 FlexibleWorkGang* workers = gch->workers();
830 assert(workers != NULL, "Need parallel worker threads.");
831 ParNewRefEnqueueTaskProxy enq_task(task);
832 workers->run_task(&enq_task);
833 }
834
835 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
836 {
837 _state_set.flush();
838 GenCollectedHeap* gch = GenCollectedHeap::heap();
839 gch->save_marks();
840 }
841
842 ScanClosureWithParBarrier::
843 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
844 ScanClosure(g, gc_barrier) {}
845
846 EvacuateFollowersClosureGeneral::
847 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
848 OopsInGenClosure* cur,
849 OopsInGenClosure* older) :
850 _gch(gch), _level(level),
851 _scan_cur_or_nonheap(cur), _scan_older(older)
852 {}
853
854 void EvacuateFollowersClosureGeneral::do_void() {
855 do {
856 // Beware: this call will lead to closure applications via virtual
857 // calls.
858 _gch->oop_since_save_marks_iterate(_level,
932 // Capture heap used before collection (for printing).
933 size_t gch_prev_used = gch->used();
934
935 age_table()->clear();
936 to()->clear(SpaceDecorator::Mangle);
937
938 gch->save_marks();
939 assert(workers != NULL, "Need parallel worker threads.");
940 uint n_workers = active_workers;
941
942 // Set the correct parallelism (number of queues) in the reference processor
943 ref_processor()->set_active_mt_degree(n_workers);
944
945 // Always set the terminator for the active number of workers
946 // because only those workers go through the termination protocol.
947 ParallelTaskTerminator _term(n_workers, task_queues());
948 ParScanThreadStateSet thread_state_set(workers->active_workers(),
949 *to(), *this, *_old_gen, *task_queues(),
950 _overflow_stacks, desired_plab_sz(), _term);
951
952 {
953 StrongRootsScope srs(n_workers);
954
955 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
956 gch->rem_set()->prepare_for_younger_refs_iterate(true);
957 // It turns out that even when we're using 1 thread, doing the work in a
958 // separate thread causes wide variance in run times. We can't help this
959 // in the multi-threaded case, but we special-case n=1 here to get
960 // repeatable measurements of the 1-thread overhead of the parallel code.
961 if (n_workers > 1) {
962 workers->run_task(&tsk);
963 } else {
964 tsk.work(0);
965 }
966 }
967
968 thread_state_set.reset(0 /* Bad value in debug if not reset */,
969 promotion_failed());
970
971 // Trace and reset failed promotion info.
972 if (promotion_failed()) {
973 thread_state_set.trace_promotion_failed(gc_tracer());
974 }
975
976 // Process (weak) reference objects found during scavenge.
977 ReferenceProcessor* rp = ref_processor();
978 IsAliveClosure is_alive(this);
979 ScanWeakRefClosure scan_weak_ref(this);
980 KeepAliveClosure keep_alive(&scan_weak_ref);
981 ScanClosure scan_without_gc_barrier(this, false);
982 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
983 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
984 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
985 &scan_without_gc_barrier, &scan_with_gc_barrier);
986 rp->setup_policy(clear_all_soft_refs);
987 // Can the mt_degree be set later (at run_task() time would be best)?
988 rp->set_active_mt_degree(active_workers);
989 ReferenceProcessorStats stats;
990 if (rp->processing_is_mt()) {
991 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
992 stats = rp->process_discovered_references(&is_alive, &keep_alive,
993 &evacuate_followers, &task_executor,
994 _gc_timer, _gc_tracer.gc_id());
995 } else {
996 thread_state_set.flush();
997 gch->save_marks();
998 stats = rp->process_discovered_references(&is_alive, &keep_alive,
999 &evacuate_followers, NULL,
1000 _gc_timer, _gc_tracer.gc_id());
1001 }
1002 _gc_tracer.report_gc_reference_stats(stats);
1003 if (!promotion_failed()) {
1004 // Swap the survivor spaces.
1005 eden()->clear(SpaceDecorator::Mangle);
1006 from()->clear(SpaceDecorator::Mangle);
1007 if (ZapUnusedHeapArea) {
1008 // This is now done here because of the piece-meal mangling which
1009 // can check for valid mangling at intermediate points in the
1010 // collection(s). When a minor collection fails to collect
1011 // sufficient space resizing of the young generation can occur
1012 // an redistribute the spaces in the young generation. Mangle
1013 // here so that unzapped regions don't get distributed to
1014 // other spaces.
1015 to()->mangle_unused_area();
1016 }
|