45 #include "memory/strongRootsScope.hpp"
46 #include "memory/space.hpp"
47 #include "oops/objArrayOop.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/atomic.inline.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/thread.inline.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/globalDefinitions.hpp"
56 #include "utilities/stack.inline.hpp"
57 #include "utilities/taskqueue.inline.hpp"
58 #include "utilities/workgroup.hpp"
59
60 #ifdef _MSC_VER
61 #pragma warning( push )
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
63 #endif
64 ParScanThreadState::ParScanThreadState(Space* to_space_,
65 ParNewGeneration* gen_,
66 Generation* old_gen_,
67 int thread_num_,
68 ObjToScanQueueSet* work_queue_set_,
69 Stack<oop, mtGC>* overflow_stacks_,
70 size_t desired_plab_sz_,
71 ParallelTaskTerminator& term_) :
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75 _ageTable(false), // false ==> not the global age table, no perf data.
76 _to_space_alloc_buffer(desired_plab_sz_),
77 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
78 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
79 _older_gen_closure(gen_, this),
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
81 &_to_space_root_closure, gen_, &_old_gen_root_closure,
82 work_queue_set_, &term_),
83 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
84 _keep_alive_closure(&_scan_weak_ref_closure),
85 _strong_roots_time(0.0), _term_time(0.0)
86 {
87 #if TASKQUEUE_STATS
88 _term_attempts = 0;
89 _overflow_refills = 0;
90 _overflow_refill_objs = 0;
91 #endif // TASKQUEUE_STATS
92
93 _survivor_chunk_array =
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
95 _hash_seed = 17; // Might want to take time-based random value.
96 _start = os::elapsedTime();
97 _old_gen_closure.set_generation(old_gen_);
98 _old_gen_root_closure.set_generation(old_gen_);
99 }
100 #ifdef _MSC_VER
101 #pragma warning( pop )
102 #endif
103
464
465 // Inform old gen that we're done.
466 _old_gen.par_promote_alloc_done(i);
467 _old_gen.par_oop_since_save_marks_iterate_done(i);
468 }
469
470 if (UseConcMarkSweepGC) {
471 // We need to call this even when ResizeOldPLAB is disabled
472 // so as to avoid breaking some asserts. While we may be able
473 // to avoid this by reorganizing the code a bit, I am loathe
474 // to do that unless we find cases where ergo leads to bad
475 // performance.
476 CFLS_LAB::compute_desired_plab_size();
477 }
478 }
479
480 ParScanClosure::ParScanClosure(ParNewGeneration* g,
481 ParScanThreadState* par_scan_state) :
482 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
483 {
484 assert(_g->level() == 0, "Optimized for youngest generation");
485 _boundary = _g->reserved().end();
486 }
487
488 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
489 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
490
491 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
492 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
493
494 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
495 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
496
497 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
498 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
499
500 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
501 ParScanThreadState* par_scan_state)
502 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
503 {}
504
549
550 // if successful, goto Start.
551 continue;
552
553 // try global overflow list.
554 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
555 continue;
556 }
557
558 // Otherwise, offer termination.
559 par_scan_state()->start_term_time();
560 if (terminator()->offer_termination()) break;
561 par_scan_state()->end_term_time();
562 }
563 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
564 "Broken overflow list?");
565 // Finish the last termination pause.
566 par_scan_state()->end_term_time();
567 }
568
569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
570 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
571 AbstractGangTask("ParNewGeneration collection"),
572 _gen(gen), _old_gen(old_gen),
573 _young_old_boundary(young_old_boundary),
574 _state_set(state_set)
575 {}
576
577 // Reset the terminator for the given number of
578 // active threads.
579 void ParNewGenTask::set_for_termination(uint active_workers) {
580 _state_set->reset(active_workers, _gen->promotion_failed());
581 // Should the heap be passed in? There's only 1 for now so
582 // grab it instead.
583 GenCollectedHeap* gch = GenCollectedHeap::heap();
584 gch->set_n_termination(active_workers);
585 }
586
587 void ParNewGenTask::work(uint worker_id) {
588 GenCollectedHeap* gch = GenCollectedHeap::heap();
589 // Since this is being done in a separate thread, need new resource
590 // and handle marks.
591 ResourceMark rm;
592 HandleMark hm;
593
594 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
595 assert(_state_set->is_valid(worker_id), "Should not have been called");
596
597 par_scan_state.set_young_old_boundary(_young_old_boundary);
598
599 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
600 gch->rem_set()->klass_rem_set());
601 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
602 &par_scan_state.to_space_root_closure(),
603 false);
604
605 par_scan_state.start_strong_roots();
606 gch->gen_process_roots(_gen->level(),
607 true, // Process younger gens, if any,
608 // as strong roots.
609 false, // no scope; this is parallel code
610 GenCollectedHeap::SO_ScavengeCodeCache,
611 GenCollectedHeap::StrongAndWeakRoots,
612 &par_scan_state.to_space_root_closure(),
613 &par_scan_state.older_gen_closure(),
614 &cld_scan_closure);
615
616 par_scan_state.end_strong_roots();
617
618 // "evacuate followers".
619 par_scan_state.evacuate_followers_closure().do_void();
620 }
621
622 #ifdef _MSC_VER
623 #pragma warning( push )
624 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
625 #endif
626 ParNewGeneration::
627 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
628 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
629 _overflow_list(NULL),
630 _is_alive_closure(this),
631 _plab_stats(YoungPLABSize, PLABWeight)
632 {
633 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
634 NOT_PRODUCT(_num_par_pushes = 0;)
635 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
636 guarantee(_task_queues != NULL, "task_queues allocation failure.");
637
638 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
639 ObjToScanQueue *q = new ObjToScanQueue();
640 guarantee(q != NULL, "work_queue Allocation failure.");
641 _task_queues->register_queue(i1, q);
642 }
643
644 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
645 _task_queues->queue(i2)->initialize();
646
647 _overflow_stacks = NULL;
648 if (ParGCUseLocalOverflow) {
746 }
747
748 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
749 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
750
751 class ParNewRefProcTaskProxy: public AbstractGangTask {
752 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
753 public:
754 ParNewRefProcTaskProxy(ProcessTask& task,
755 ParNewGeneration& gen,
756 Generation& old_gen,
757 HeapWord* young_old_boundary,
758 ParScanThreadStateSet& state_set);
759
760 private:
761 virtual void work(uint worker_id);
762 virtual void set_for_termination(uint active_workers) {
763 _state_set.terminator()->reset_for_reuse(active_workers);
764 }
765 private:
766 ParNewGeneration& _gen;
767 ProcessTask& _task;
768 Generation& _old_gen;
769 HeapWord* _young_old_boundary;
770 ParScanThreadStateSet& _state_set;
771 };
772
773 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
774 ParNewGeneration& gen,
775 Generation& old_gen,
776 HeapWord* young_old_boundary,
777 ParScanThreadStateSet& state_set)
778 : AbstractGangTask("ParNewGeneration parallel reference processing"),
779 _gen(gen),
780 _task(task),
781 _old_gen(old_gen),
782 _young_old_boundary(young_old_boundary),
783 _state_set(state_set)
784 {
785 }
786
787 void ParNewRefProcTaskProxy::work(uint worker_id)
788 {
789 ResourceMark rm;
790 HandleMark hm;
791 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
792 par_scan_state.set_young_old_boundary(_young_old_boundary);
793 _task.work(worker_id, par_scan_state.is_alive_closure(),
794 par_scan_state.keep_alive_closure(),
795 par_scan_state.evacuate_followers_closure());
796 }
797
798 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
799 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
800 EnqueueTask& _task;
801
802 public:
803 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
804 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
805 _task(task)
806 { }
807
808 virtual void work(uint worker_id)
809 {
810 _task.work(worker_id);
811 }
812 };
813
814
815 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
816 {
817 GenCollectedHeap* gch = GenCollectedHeap::heap();
818 FlexibleWorkGang* workers = gch->workers();
819 assert(workers != NULL, "Need parallel worker threads.");
820 _state_set.reset(workers->active_workers(), _generation.promotion_failed());
821 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
822 _generation.reserved().end(), _state_set);
823 workers->run_task(&rp_task);
824 _state_set.reset(0 /* bad value in debug if not reset */,
825 _generation.promotion_failed());
826 }
827
828 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
829 {
830 GenCollectedHeap* gch = GenCollectedHeap::heap();
831 FlexibleWorkGang* workers = gch->workers();
832 assert(workers != NULL, "Need parallel worker threads.");
833 ParNewRefEnqueueTaskProxy enq_task(task);
834 workers->run_task(&enq_task);
835 }
836
837 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
838 {
839 _state_set.flush();
840 GenCollectedHeap* gch = GenCollectedHeap::heap();
841 gch->set_par_threads(0); // 0 ==> non-parallel.
842 gch->save_marks();
843 }
844
845 ScanClosureWithParBarrier::
846 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
847 ScanClosure(g, gc_barrier) {}
848
849 EvacuateFollowersClosureGeneral::
850 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
851 OopsInGenClosure* cur,
852 OopsInGenClosure* older) :
853 _gch(gch), _level(level),
854 _scan_cur_or_nonheap(cur), _scan_older(older)
855 {}
856
857 void EvacuateFollowersClosureGeneral::do_void() {
858 do {
859 // Beware: this call will lead to closure applications via virtual
860 // calls.
861 _gch->oop_since_save_marks_iterate(_level,
862 _scan_cur_or_nonheap,
863 _scan_older);
864 } while (!_gch->no_allocs_since_save_marks(_level));
865 }
866
867
868 // A Generation that does parallel young-gen collection.
869
870 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
871 assert(_promo_failure_scan_stack.is_empty(), "post condition");
872 _promo_failure_scan_stack.clear(true); // Clear cached segments.
873
874 remove_forwarding_pointers();
875 if (PrintGCDetails) {
876 gclog_or_tty->print(" (promotion failed)");
877 }
878 // All the spaces are in play for mark-sweep.
879 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
880 from()->set_next_compaction_space(to());
881 gch->set_incremental_collection_failed();
882 // Inform the next generation that a promotion failure occurred.
883 _old_gen->promotion_failure_occurred();
884
965 } else {
966 StrongRootsScope srs;
967 tsk.work(0);
968 }
969 thread_state_set.reset(0 /* Bad value in debug if not reset */,
970 promotion_failed());
971
972 // Trace and reset failed promotion info.
973 if (promotion_failed()) {
974 thread_state_set.trace_promotion_failed(gc_tracer());
975 }
976
977 // Process (weak) reference objects found during scavenge.
978 ReferenceProcessor* rp = ref_processor();
979 IsAliveClosure is_alive(this);
980 ScanWeakRefClosure scan_weak_ref(this);
981 KeepAliveClosure keep_alive(&scan_weak_ref);
982 ScanClosure scan_without_gc_barrier(this, false);
983 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
984 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
985 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
986 &scan_without_gc_barrier, &scan_with_gc_barrier);
987 rp->setup_policy(clear_all_soft_refs);
988 // Can the mt_degree be set later (at run_task() time would be best)?
989 rp->set_active_mt_degree(active_workers);
990 ReferenceProcessorStats stats;
991 if (rp->processing_is_mt()) {
992 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
993 stats = rp->process_discovered_references(&is_alive, &keep_alive,
994 &evacuate_followers, &task_executor,
995 _gc_timer, _gc_tracer.gc_id());
996 } else {
997 thread_state_set.flush();
998 gch->set_par_threads(0); // 0 ==> non-parallel.
999 gch->save_marks();
1000 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1001 &evacuate_followers, NULL,
1002 _gc_timer, _gc_tracer.gc_id());
1003 }
1004 _gc_tracer.report_gc_reference_stats(stats);
1005 if (!promotion_failed()) {
1006 // Swap the survivor spaces.
1007 eden()->clear(SpaceDecorator::Mangle);
1008 from()->clear(SpaceDecorator::Mangle);
1009 if (ZapUnusedHeapArea) {
1010 // This is now done here because of the piece-meal mangling which
1011 // can check for valid mangling at intermediate points in the
1012 // collection(s). When a minor collection fails to collect
1039 if (PrintGC && !PrintGCDetails) {
1040 gch->print_heap_change(gch_prev_used);
1041 }
1042
1043 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1044 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1045
1046 if (UseAdaptiveSizePolicy) {
1047 size_policy->minor_collection_end(gch->gc_cause());
1048 size_policy->avg_survived()->sample(from()->used());
1049 }
1050
1051 // We need to use a monotonically non-decreasing time in ms
1052 // or we will see time-warp warnings and os::javaTimeMillis()
1053 // does not guarantee monotonicity.
1054 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1055 update_time_of_last_gc(now);
1056
1057 rp->set_enqueuing_is_done(true);
1058 if (rp->processing_is_mt()) {
1059 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1060 rp->enqueue_discovered_references(&task_executor);
1061 } else {
1062 rp->enqueue_discovered_references(NULL);
1063 }
1064 rp->verify_no_references_recorded();
1065
1066 gch->trace_heap_after_gc(gc_tracer());
1067 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1068
1069 _gc_timer->register_gc_end();
1070
1071 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1072 }
1073
1074 static int sum;
1075 void ParNewGeneration::waste_some_time() {
1076 for (int i = 0; i < 100; i++) {
1077 sum += i;
1078 }
1079 }
|
45 #include "memory/strongRootsScope.hpp"
46 #include "memory/space.hpp"
47 #include "oops/objArrayOop.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/atomic.inline.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/thread.inline.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/globalDefinitions.hpp"
56 #include "utilities/stack.inline.hpp"
57 #include "utilities/taskqueue.inline.hpp"
58 #include "utilities/workgroup.hpp"
59
60 #ifdef _MSC_VER
61 #pragma warning( push )
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
63 #endif
64 ParScanThreadState::ParScanThreadState(Space* to_space_,
65 ParNewGeneration* young_gen_,
66 Generation* old_gen_,
67 int thread_num_,
68 ObjToScanQueueSet* work_queue_set_,
69 Stack<oop, mtGC>* overflow_stacks_,
70 size_t desired_plab_sz_,
71 ParallelTaskTerminator& term_) :
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75 _ageTable(false), // false ==> not the global age table, no perf data.
76 _to_space_alloc_buffer(desired_plab_sz_),
77 _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
78 _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
79 _older_gen_closure(young_gen_, this),
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
81 &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
82 work_queue_set_, &term_),
83 _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
84 _keep_alive_closure(&_scan_weak_ref_closure),
85 _strong_roots_time(0.0), _term_time(0.0)
86 {
87 #if TASKQUEUE_STATS
88 _term_attempts = 0;
89 _overflow_refills = 0;
90 _overflow_refill_objs = 0;
91 #endif // TASKQUEUE_STATS
92
93 _survivor_chunk_array =
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
95 _hash_seed = 17; // Might want to take time-based random value.
96 _start = os::elapsedTime();
97 _old_gen_closure.set_generation(old_gen_);
98 _old_gen_root_closure.set_generation(old_gen_);
99 }
100 #ifdef _MSC_VER
101 #pragma warning( pop )
102 #endif
103
464
465 // Inform old gen that we're done.
466 _old_gen.par_promote_alloc_done(i);
467 _old_gen.par_oop_since_save_marks_iterate_done(i);
468 }
469
470 if (UseConcMarkSweepGC) {
471 // We need to call this even when ResizeOldPLAB is disabled
472 // so as to avoid breaking some asserts. While we may be able
473 // to avoid this by reorganizing the code a bit, I am loathe
474 // to do that unless we find cases where ergo leads to bad
475 // performance.
476 CFLS_LAB::compute_desired_plab_size();
477 }
478 }
479
480 ParScanClosure::ParScanClosure(ParNewGeneration* g,
481 ParScanThreadState* par_scan_state) :
482 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
483 {
484 _boundary = _g->reserved().end();
485 }
486
487 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
488 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
489
490 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
491 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
492
493 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
494 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
495
496 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
497 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
498
499 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
500 ParScanThreadState* par_scan_state)
501 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
502 {}
503
548
549 // if successful, goto Start.
550 continue;
551
552 // try global overflow list.
553 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
554 continue;
555 }
556
557 // Otherwise, offer termination.
558 par_scan_state()->start_term_time();
559 if (terminator()->offer_termination()) break;
560 par_scan_state()->end_term_time();
561 }
562 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
563 "Broken overflow list?");
564 // Finish the last termination pause.
565 par_scan_state()->end_term_time();
566 }
567
568 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
569 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
570 AbstractGangTask("ParNewGeneration collection"),
571 _young_gen(young_gen), _old_gen(old_gen),
572 _young_old_boundary(young_old_boundary),
573 _state_set(state_set)
574 {}
575
576 // Reset the terminator for the given number of
577 // active threads.
578 void ParNewGenTask::set_for_termination(uint active_workers) {
579 _state_set->reset(active_workers, _young_gen->promotion_failed());
580 // Should the heap be passed in? There's only 1 for now so
581 // grab it instead.
582 GenCollectedHeap* gch = GenCollectedHeap::heap();
583 gch->set_n_termination(active_workers);
584 }
585
586 void ParNewGenTask::work(uint worker_id) {
587 GenCollectedHeap* gch = GenCollectedHeap::heap();
588 // Since this is being done in a separate thread, need new resource
589 // and handle marks.
590 ResourceMark rm;
591 HandleMark hm;
592
593 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
594 assert(_state_set->is_valid(worker_id), "Should not have been called");
595
596 par_scan_state.set_young_old_boundary(_young_old_boundary);
597
598 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
599 gch->rem_set()->klass_rem_set());
600 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
601 &par_scan_state.to_space_root_closure(),
602 false);
603
604 par_scan_state.start_strong_roots();
605 gch->gen_process_roots(Generation::Young,
606 true, // Process younger gens, if any,
607 // as strong roots.
608 false, // no scope; this is parallel code
609 GenCollectedHeap::SO_ScavengeCodeCache,
610 GenCollectedHeap::StrongAndWeakRoots,
611 &par_scan_state.to_space_root_closure(),
612 &par_scan_state.older_gen_closure(),
613 &cld_scan_closure);
614
615 par_scan_state.end_strong_roots();
616
617 // "evacuate followers".
618 par_scan_state.evacuate_followers_closure().do_void();
619 }
620
621 #ifdef _MSC_VER
622 #pragma warning( push )
623 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
624 #endif
625 ParNewGeneration::
626 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
627 : DefNewGeneration(rs, initial_byte_size, "PCopy"),
628 _overflow_list(NULL),
629 _is_alive_closure(this),
630 _plab_stats(YoungPLABSize, PLABWeight)
631 {
632 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
633 NOT_PRODUCT(_num_par_pushes = 0;)
634 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
635 guarantee(_task_queues != NULL, "task_queues allocation failure.");
636
637 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
638 ObjToScanQueue *q = new ObjToScanQueue();
639 guarantee(q != NULL, "work_queue Allocation failure.");
640 _task_queues->register_queue(i1, q);
641 }
642
643 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
644 _task_queues->queue(i2)->initialize();
645
646 _overflow_stacks = NULL;
647 if (ParGCUseLocalOverflow) {
745 }
746
747 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
748 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
749
750 class ParNewRefProcTaskProxy: public AbstractGangTask {
751 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
752 public:
753 ParNewRefProcTaskProxy(ProcessTask& task,
754 ParNewGeneration& gen,
755 Generation& old_gen,
756 HeapWord* young_old_boundary,
757 ParScanThreadStateSet& state_set);
758
759 private:
760 virtual void work(uint worker_id);
761 virtual void set_for_termination(uint active_workers) {
762 _state_set.terminator()->reset_for_reuse(active_workers);
763 }
764 private:
765 ParNewGeneration& _young_gen;
766 ProcessTask& _task;
767 Generation& _old_gen;
768 HeapWord* _young_old_boundary;
769 ParScanThreadStateSet& _state_set;
770 };
771
772 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
773 ParNewGeneration& young_gen,
774 Generation& old_gen,
775 HeapWord* young_old_boundary,
776 ParScanThreadStateSet& state_set)
777 : AbstractGangTask("ParNewGeneration parallel reference processing"),
778 _young_gen(young_gen),
779 _task(task),
780 _old_gen(old_gen),
781 _young_old_boundary(young_old_boundary),
782 _state_set(state_set)
783 {
784 }
785
786 void ParNewRefProcTaskProxy::work(uint worker_id)
787 {
788 ResourceMark rm;
789 HandleMark hm;
790 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
791 par_scan_state.set_young_old_boundary(_young_old_boundary);
792 _task.work(worker_id, par_scan_state.is_alive_closure(),
793 par_scan_state.keep_alive_closure(),
794 par_scan_state.evacuate_followers_closure());
795 }
796
797 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
798 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
799 EnqueueTask& _task;
800
801 public:
802 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
803 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
804 _task(task)
805 { }
806
807 virtual void work(uint worker_id)
808 {
809 _task.work(worker_id);
810 }
811 };
812
813
814 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
815 {
816 GenCollectedHeap* gch = GenCollectedHeap::heap();
817 FlexibleWorkGang* workers = gch->workers();
818 assert(workers != NULL, "Need parallel worker threads.");
819 _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
820 ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
821 _young_gen.reserved().end(), _state_set);
822 workers->run_task(&rp_task);
823 _state_set.reset(0 /* bad value in debug if not reset */,
824 _young_gen.promotion_failed());
825 }
826
827 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
828 {
829 GenCollectedHeap* gch = GenCollectedHeap::heap();
830 FlexibleWorkGang* workers = gch->workers();
831 assert(workers != NULL, "Need parallel worker threads.");
832 ParNewRefEnqueueTaskProxy enq_task(task);
833 workers->run_task(&enq_task);
834 }
835
836 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
837 {
838 _state_set.flush();
839 GenCollectedHeap* gch = GenCollectedHeap::heap();
840 gch->set_par_threads(0); // 0 ==> non-parallel.
841 gch->save_marks();
842 }
843
844 ScanClosureWithParBarrier::
845 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
846 ScanClosure(g, gc_barrier) {}
847
848 EvacuateFollowersClosureGeneral::
849 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
850 OopsInGenClosure* cur,
851 OopsInGenClosure* older) :
852 _gch(gch),
853 _scan_cur_or_nonheap(cur), _scan_older(older)
854 {}
855
856 void EvacuateFollowersClosureGeneral::do_void() {
857 do {
858 // Beware: this call will lead to closure applications via virtual
859 // calls.
860 _gch->oop_since_save_marks_iterate(Generation::Young,
861 _scan_cur_or_nonheap,
862 _scan_older);
863 } while (!_gch->no_allocs_since_save_marks(true /* include_young */));
864 }
865
866
867 // A Generation that does parallel young-gen collection.
868
869 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
870 assert(_promo_failure_scan_stack.is_empty(), "post condition");
871 _promo_failure_scan_stack.clear(true); // Clear cached segments.
872
873 remove_forwarding_pointers();
874 if (PrintGCDetails) {
875 gclog_or_tty->print(" (promotion failed)");
876 }
877 // All the spaces are in play for mark-sweep.
878 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
879 from()->set_next_compaction_space(to());
880 gch->set_incremental_collection_failed();
881 // Inform the next generation that a promotion failure occurred.
882 _old_gen->promotion_failure_occurred();
883
964 } else {
965 StrongRootsScope srs;
966 tsk.work(0);
967 }
968 thread_state_set.reset(0 /* Bad value in debug if not reset */,
969 promotion_failed());
970
971 // Trace and reset failed promotion info.
972 if (promotion_failed()) {
973 thread_state_set.trace_promotion_failed(gc_tracer());
974 }
975
976 // Process (weak) reference objects found during scavenge.
977 ReferenceProcessor* rp = ref_processor();
978 IsAliveClosure is_alive(this);
979 ScanWeakRefClosure scan_weak_ref(this);
980 KeepAliveClosure keep_alive(&scan_weak_ref);
981 ScanClosure scan_without_gc_barrier(this, false);
982 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
983 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
984 EvacuateFollowersClosureGeneral evacuate_followers(gch,
985 &scan_without_gc_barrier, &scan_with_gc_barrier);
986 rp->setup_policy(clear_all_soft_refs);
987 // Can the mt_degree be set later (at run_task() time would be best)?
988 rp->set_active_mt_degree(active_workers);
989 ReferenceProcessorStats stats;
990 if (rp->processing_is_mt()) {
991 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
992 stats = rp->process_discovered_references(&is_alive, &keep_alive,
993 &evacuate_followers, &task_executor,
994 _gc_timer, _gc_tracer.gc_id());
995 } else {
996 thread_state_set.flush();
997 gch->set_par_threads(0); // 0 ==> non-parallel.
998 gch->save_marks();
999 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1000 &evacuate_followers, NULL,
1001 _gc_timer, _gc_tracer.gc_id());
1002 }
1003 _gc_tracer.report_gc_reference_stats(stats);
1004 if (!promotion_failed()) {
1005 // Swap the survivor spaces.
1006 eden()->clear(SpaceDecorator::Mangle);
1007 from()->clear(SpaceDecorator::Mangle);
1008 if (ZapUnusedHeapArea) {
1009 // This is now done here because of the piece-meal mangling which
1010 // can check for valid mangling at intermediate points in the
1011 // collection(s). When a minor collection fails to collect
1038 if (PrintGC && !PrintGCDetails) {
1039 gch->print_heap_change(gch_prev_used);
1040 }
1041
1042 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1043 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1044
1045 if (UseAdaptiveSizePolicy) {
1046 size_policy->minor_collection_end(gch->gc_cause());
1047 size_policy->avg_survived()->sample(from()->used());
1048 }
1049
1050 // We need to use a monotonically non-decreasing time in ms
1051 // or we will see time-warp warnings and os::javaTimeMillis()
1052 // does not guarantee monotonicity.
1053 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1054 update_time_of_last_gc(now);
1055
1056 rp->set_enqueuing_is_done(true);
1057 if (rp->processing_is_mt()) {
1058 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1059 rp->enqueue_discovered_references(&task_executor);
1060 } else {
1061 rp->enqueue_discovered_references(NULL);
1062 }
1063 rp->verify_no_references_recorded();
1064
1065 gch->trace_heap_after_gc(gc_tracer());
1066 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1067
1068 _gc_timer->register_gc_end();
1069
1070 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1071 }
1072
1073 static int sum;
1074 void ParNewGeneration::waste_some_time() {
1075 for (int i = 0; i < 100; i++) {
1076 sum += i;
1077 }
1078 }
|