45 #include "gc/shared/strongRootsScope.hpp"
46 #include "gc/shared/taskqueue.inline.hpp"
47 #include "gc/shared/workgroup.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayOop.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/atomic.inline.hpp"
52 #include "runtime/handles.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/thread.inline.hpp"
56 #include "utilities/copy.hpp"
57 #include "utilities/globalDefinitions.hpp"
58 #include "utilities/stack.inline.hpp"
59
60 #ifdef _MSC_VER
61 #pragma warning( push )
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
63 #endif
64 ParScanThreadState::ParScanThreadState(Space* to_space_,
65 ParNewGeneration* gen_,
66 Generation* old_gen_,
67 int thread_num_,
68 ObjToScanQueueSet* work_queue_set_,
69 Stack<oop, mtGC>* overflow_stacks_,
70 size_t desired_plab_sz_,
71 ParallelTaskTerminator& term_) :
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75 _ageTable(false), // false ==> not the global age table, no perf data.
76 _to_space_alloc_buffer(desired_plab_sz_),
77 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
78 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
79 _older_gen_closure(gen_, this),
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
81 &_to_space_root_closure, gen_, &_old_gen_root_closure,
82 work_queue_set_, &term_),
83 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
84 _keep_alive_closure(&_scan_weak_ref_closure),
85 _strong_roots_time(0.0), _term_time(0.0)
86 {
87 #if TASKQUEUE_STATS
88 _term_attempts = 0;
89 _overflow_refills = 0;
90 _overflow_refill_objs = 0;
91 #endif // TASKQUEUE_STATS
92
93 _survivor_chunk_array =
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
95 _hash_seed = 17; // Might want to take time-based random value.
96 _start = os::elapsedTime();
97 _old_gen_closure.set_generation(old_gen_);
98 _old_gen_root_closure.set_generation(old_gen_);
99 }
100 #ifdef _MSC_VER
101 #pragma warning( pop )
102 #endif
103
464
465 // Inform old gen that we're done.
466 _old_gen.par_promote_alloc_done(i);
467 _old_gen.par_oop_since_save_marks_iterate_done(i);
468 }
469
470 if (UseConcMarkSweepGC) {
471 // We need to call this even when ResizeOldPLAB is disabled
472 // so as to avoid breaking some asserts. While we may be able
473 // to avoid this by reorganizing the code a bit, I am loathe
474 // to do that unless we find cases where ergo leads to bad
475 // performance.
476 CFLS_LAB::compute_desired_plab_size();
477 }
478 }
479
480 ParScanClosure::ParScanClosure(ParNewGeneration* g,
481 ParScanThreadState* par_scan_state) :
482 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
483 {
484 assert(_g->level() == 0, "Optimized for youngest generation");
485 _boundary = _g->reserved().end();
486 }
487
488 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
489 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
490
491 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
492 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
493
494 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
495 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
496
497 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
498 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
499
500 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
501 ParScanThreadState* par_scan_state)
502 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
503 {}
504
549
550 // if successful, goto Start.
551 continue;
552
553 // try global overflow list.
554 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
555 continue;
556 }
557
558 // Otherwise, offer termination.
559 par_scan_state()->start_term_time();
560 if (terminator()->offer_termination()) break;
561 par_scan_state()->end_term_time();
562 }
563 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
564 "Broken overflow list?");
565 // Finish the last termination pause.
566 par_scan_state()->end_term_time();
567 }
568
569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
570 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
571 StrongRootsScope* strong_roots_scope) :
572 AbstractGangTask("ParNewGeneration collection"),
573 _gen(gen), _old_gen(old_gen),
574 _young_old_boundary(young_old_boundary),
575 _state_set(state_set),
576 _strong_roots_scope(strong_roots_scope)
577 {}
578
579 void ParNewGenTask::work(uint worker_id) {
580 GenCollectedHeap* gch = GenCollectedHeap::heap();
581 // Since this is being done in a separate thread, need new resource
582 // and handle marks.
583 ResourceMark rm;
584 HandleMark hm;
585
586 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
587 assert(_state_set->is_valid(worker_id), "Should not have been called");
588
589 par_scan_state.set_young_old_boundary(_young_old_boundary);
590
591 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
592 gch->rem_set()->klass_rem_set());
593 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
594 &par_scan_state.to_space_root_closure(),
595 false);
596
597 par_scan_state.start_strong_roots();
598 gch->gen_process_roots(_strong_roots_scope,
599 _gen->level(),
600 true, // Process younger gens, if any,
601 // as strong roots.
602 GenCollectedHeap::SO_ScavengeCodeCache,
603 GenCollectedHeap::StrongAndWeakRoots,
604 &par_scan_state.to_space_root_closure(),
605 &par_scan_state.older_gen_closure(),
606 &cld_scan_closure);
607
608 par_scan_state.end_strong_roots();
609
610 // "evacuate followers".
611 par_scan_state.evacuate_followers_closure().do_void();
612 }
613
614 #ifdef _MSC_VER
615 #pragma warning( push )
616 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
617 #endif
618 ParNewGeneration::
619 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
620 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
621 _overflow_list(NULL),
622 _is_alive_closure(this),
623 _plab_stats(YoungPLABSize, PLABWeight)
624 {
625 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
626 NOT_PRODUCT(_num_par_pushes = 0;)
627 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
628 guarantee(_task_queues != NULL, "task_queues allocation failure.");
629
630 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
631 ObjToScanQueue *q = new ObjToScanQueue();
632 guarantee(q != NULL, "work_queue Allocation failure.");
633 _task_queues->register_queue(i1, q);
634 }
635
636 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
637 _task_queues->queue(i2)->initialize();
638
639 _overflow_stacks = NULL;
640 if (ParGCUseLocalOverflow) {
735 }
736 }
737 }
738 }
739
740 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
741 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
742
743 class ParNewRefProcTaskProxy: public AbstractGangTask {
744 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
745 public:
746 ParNewRefProcTaskProxy(ProcessTask& task,
747 ParNewGeneration& gen,
748 Generation& old_gen,
749 HeapWord* young_old_boundary,
750 ParScanThreadStateSet& state_set);
751
752 private:
753 virtual void work(uint worker_id);
754 private:
755 ParNewGeneration& _gen;
756 ProcessTask& _task;
757 Generation& _old_gen;
758 HeapWord* _young_old_boundary;
759 ParScanThreadStateSet& _state_set;
760 };
761
762 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
763 ParNewGeneration& gen,
764 Generation& old_gen,
765 HeapWord* young_old_boundary,
766 ParScanThreadStateSet& state_set)
767 : AbstractGangTask("ParNewGeneration parallel reference processing"),
768 _gen(gen),
769 _task(task),
770 _old_gen(old_gen),
771 _young_old_boundary(young_old_boundary),
772 _state_set(state_set)
773 {
774 }
775
776 void ParNewRefProcTaskProxy::work(uint worker_id)
777 {
778 ResourceMark rm;
779 HandleMark hm;
780 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
781 par_scan_state.set_young_old_boundary(_young_old_boundary);
782 _task.work(worker_id, par_scan_state.is_alive_closure(),
783 par_scan_state.keep_alive_closure(),
784 par_scan_state.evacuate_followers_closure());
785 }
786
787 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
788 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
789 EnqueueTask& _task;
790
791 public:
792 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
793 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
794 _task(task)
795 { }
796
797 virtual void work(uint worker_id)
798 {
799 _task.work(worker_id);
800 }
801 };
802
803
804 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
805 {
806 GenCollectedHeap* gch = GenCollectedHeap::heap();
807 FlexibleWorkGang* workers = gch->workers();
808 assert(workers != NULL, "Need parallel worker threads.");
809 _state_set.reset(workers->active_workers(), _generation.promotion_failed());
810 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
811 _generation.reserved().end(), _state_set);
812 workers->run_task(&rp_task);
813 _state_set.reset(0 /* bad value in debug if not reset */,
814 _generation.promotion_failed());
815 }
816
817 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
818 {
819 GenCollectedHeap* gch = GenCollectedHeap::heap();
820 FlexibleWorkGang* workers = gch->workers();
821 assert(workers != NULL, "Need parallel worker threads.");
822 ParNewRefEnqueueTaskProxy enq_task(task);
823 workers->run_task(&enq_task);
824 }
825
826 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
827 {
828 _state_set.flush();
829 GenCollectedHeap* gch = GenCollectedHeap::heap();
830 gch->save_marks();
831 }
832
833 ScanClosureWithParBarrier::
834 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
835 ScanClosure(g, gc_barrier) {}
836
837 EvacuateFollowersClosureGeneral::
838 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
839 OopsInGenClosure* cur,
840 OopsInGenClosure* older) :
841 _gch(gch), _level(level),
842 _scan_cur_or_nonheap(cur), _scan_older(older)
843 {}
844
845 void EvacuateFollowersClosureGeneral::do_void() {
846 do {
847 // Beware: this call will lead to closure applications via virtual
848 // calls.
849 _gch->oop_since_save_marks_iterate(_level,
850 _scan_cur_or_nonheap,
851 _scan_older);
852 } while (!_gch->no_allocs_since_save_marks(_level));
853 }
854
855
856 // A Generation that does parallel young-gen collection.
857
858 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
859 assert(_promo_failure_scan_stack.is_empty(), "post condition");
860 _promo_failure_scan_stack.clear(true); // Clear cached segments.
861
862 remove_forwarding_pointers();
863 if (PrintGCDetails) {
864 gclog_or_tty->print(" (promotion failed)");
865 }
866 // All the spaces are in play for mark-sweep.
867 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
868 from()->set_next_compaction_space(to());
869 gch->set_incremental_collection_failed();
870 // Inform the next generation that a promotion failure occurred.
871 _old_gen->promotion_failure_occurred();
872
955 tsk.work(0);
956 }
957 }
958
959 thread_state_set.reset(0 /* Bad value in debug if not reset */,
960 promotion_failed());
961
962 // Trace and reset failed promotion info.
963 if (promotion_failed()) {
964 thread_state_set.trace_promotion_failed(gc_tracer());
965 }
966
967 // Process (weak) reference objects found during scavenge.
968 ReferenceProcessor* rp = ref_processor();
969 IsAliveClosure is_alive(this);
970 ScanWeakRefClosure scan_weak_ref(this);
971 KeepAliveClosure keep_alive(&scan_weak_ref);
972 ScanClosure scan_without_gc_barrier(this, false);
973 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
974 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
975 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
976 &scan_without_gc_barrier, &scan_with_gc_barrier);
977 rp->setup_policy(clear_all_soft_refs);
978 // Can the mt_degree be set later (at run_task() time would be best)?
979 rp->set_active_mt_degree(active_workers);
980 ReferenceProcessorStats stats;
981 if (rp->processing_is_mt()) {
982 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
983 stats = rp->process_discovered_references(&is_alive, &keep_alive,
984 &evacuate_followers, &task_executor,
985 _gc_timer, _gc_tracer.gc_id());
986 } else {
987 thread_state_set.flush();
988 gch->save_marks();
989 stats = rp->process_discovered_references(&is_alive, &keep_alive,
990 &evacuate_followers, NULL,
991 _gc_timer, _gc_tracer.gc_id());
992 }
993 _gc_tracer.report_gc_reference_stats(stats);
994 if (!promotion_failed()) {
995 // Swap the survivor spaces.
996 eden()->clear(SpaceDecorator::Mangle);
997 from()->clear(SpaceDecorator::Mangle);
998 if (ZapUnusedHeapArea) {
999 // This is now done here because of the piece-meal mangling which
1000 // can check for valid mangling at intermediate points in the
1001 // collection(s). When a minor collection fails to collect
1002 // sufficient space resizing of the young generation can occur
1028 if (PrintGC && !PrintGCDetails) {
1029 gch->print_heap_change(gch_prev_used);
1030 }
1031
1032 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1033 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1034
1035 if (UseAdaptiveSizePolicy) {
1036 size_policy->minor_collection_end(gch->gc_cause());
1037 size_policy->avg_survived()->sample(from()->used());
1038 }
1039
1040 // We need to use a monotonically non-decreasing time in ms
1041 // or we will see time-warp warnings and os::javaTimeMillis()
1042 // does not guarantee monotonicity.
1043 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1044 update_time_of_last_gc(now);
1045
1046 rp->set_enqueuing_is_done(true);
1047 if (rp->processing_is_mt()) {
1048 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1049 rp->enqueue_discovered_references(&task_executor);
1050 } else {
1051 rp->enqueue_discovered_references(NULL);
1052 }
1053 rp->verify_no_references_recorded();
1054
1055 gch->trace_heap_after_gc(gc_tracer());
1056 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1057
1058 _gc_timer->register_gc_end();
1059
1060 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1061 }
1062
1063 static int sum;
1064 void ParNewGeneration::waste_some_time() {
1065 for (int i = 0; i < 100; i++) {
1066 sum += i;
1067 }
1068 }
|
45 #include "gc/shared/strongRootsScope.hpp"
46 #include "gc/shared/taskqueue.inline.hpp"
47 #include "gc/shared/workgroup.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayOop.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/atomic.inline.hpp"
52 #include "runtime/handles.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/thread.inline.hpp"
56 #include "utilities/copy.hpp"
57 #include "utilities/globalDefinitions.hpp"
58 #include "utilities/stack.inline.hpp"
59
60 #ifdef _MSC_VER
61 #pragma warning( push )
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
63 #endif
64 ParScanThreadState::ParScanThreadState(Space* to_space_,
65 ParNewGeneration* young_gen_,
66 Generation* old_gen_,
67 int thread_num_,
68 ObjToScanQueueSet* work_queue_set_,
69 Stack<oop, mtGC>* overflow_stacks_,
70 size_t desired_plab_sz_,
71 ParallelTaskTerminator& term_) :
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75 _ageTable(false), // false ==> not the global age table, no perf data.
76 _to_space_alloc_buffer(desired_plab_sz_),
77 _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
78 _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
79 _older_gen_closure(young_gen_, this),
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
81 &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
82 work_queue_set_, &term_),
83 _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
84 _keep_alive_closure(&_scan_weak_ref_closure),
85 _strong_roots_time(0.0), _term_time(0.0)
86 {
87 #if TASKQUEUE_STATS
88 _term_attempts = 0;
89 _overflow_refills = 0;
90 _overflow_refill_objs = 0;
91 #endif // TASKQUEUE_STATS
92
93 _survivor_chunk_array =
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
95 _hash_seed = 17; // Might want to take time-based random value.
96 _start = os::elapsedTime();
97 _old_gen_closure.set_generation(old_gen_);
98 _old_gen_root_closure.set_generation(old_gen_);
99 }
100 #ifdef _MSC_VER
101 #pragma warning( pop )
102 #endif
103
464
465 // Inform old gen that we're done.
466 _old_gen.par_promote_alloc_done(i);
467 _old_gen.par_oop_since_save_marks_iterate_done(i);
468 }
469
470 if (UseConcMarkSweepGC) {
471 // We need to call this even when ResizeOldPLAB is disabled
472 // so as to avoid breaking some asserts. While we may be able
473 // to avoid this by reorganizing the code a bit, I am loathe
474 // to do that unless we find cases where ergo leads to bad
475 // performance.
476 CFLS_LAB::compute_desired_plab_size();
477 }
478 }
479
480 ParScanClosure::ParScanClosure(ParNewGeneration* g,
481 ParScanThreadState* par_scan_state) :
482 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
483 {
484 _boundary = _g->reserved().end();
485 }
486
487 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
488 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
489
490 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
491 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
492
493 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
494 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
495
496 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
497 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
498
499 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
500 ParScanThreadState* par_scan_state)
501 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
502 {}
503
548
549 // if successful, goto Start.
550 continue;
551
552 // try global overflow list.
553 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
554 continue;
555 }
556
557 // Otherwise, offer termination.
558 par_scan_state()->start_term_time();
559 if (terminator()->offer_termination()) break;
560 par_scan_state()->end_term_time();
561 }
562 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
563 "Broken overflow list?");
564 // Finish the last termination pause.
565 par_scan_state()->end_term_time();
566 }
567
568 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
569 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
570 StrongRootsScope* strong_roots_scope) :
571 AbstractGangTask("ParNewGeneration collection"),
572 _young_gen(young_gen), _old_gen(old_gen),
573 _young_old_boundary(young_old_boundary),
574 _state_set(state_set),
575 _strong_roots_scope(strong_roots_scope)
576 {}
577
578 void ParNewGenTask::work(uint worker_id) {
579 GenCollectedHeap* gch = GenCollectedHeap::heap();
580 // Since this is being done in a separate thread, need new resource
581 // and handle marks.
582 ResourceMark rm;
583 HandleMark hm;
584
585 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
586 assert(_state_set->is_valid(worker_id), "Should not have been called");
587
588 par_scan_state.set_young_old_boundary(_young_old_boundary);
589
590 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
591 gch->rem_set()->klass_rem_set());
592 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
593 &par_scan_state.to_space_root_closure(),
594 false);
595
596 par_scan_state.start_strong_roots();
597 gch->gen_process_roots(_strong_roots_scope,
598 Generation::Young,
599 true, // Process younger gens, if any,
600 // as strong roots.
601 GenCollectedHeap::SO_ScavengeCodeCache,
602 GenCollectedHeap::StrongAndWeakRoots,
603 &par_scan_state.to_space_root_closure(),
604 &par_scan_state.older_gen_closure(),
605 &cld_scan_closure);
606
607 par_scan_state.end_strong_roots();
608
609 // "evacuate followers".
610 par_scan_state.evacuate_followers_closure().do_void();
611 }
612
613 #ifdef _MSC_VER
614 #pragma warning( push )
615 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
616 #endif
617 ParNewGeneration::
618 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
619 : DefNewGeneration(rs, initial_byte_size, "PCopy"),
620 _overflow_list(NULL),
621 _is_alive_closure(this),
622 _plab_stats(YoungPLABSize, PLABWeight)
623 {
624 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
625 NOT_PRODUCT(_num_par_pushes = 0;)
626 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
627 guarantee(_task_queues != NULL, "task_queues allocation failure.");
628
629 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
630 ObjToScanQueue *q = new ObjToScanQueue();
631 guarantee(q != NULL, "work_queue Allocation failure.");
632 _task_queues->register_queue(i1, q);
633 }
634
635 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
636 _task_queues->queue(i2)->initialize();
637
638 _overflow_stacks = NULL;
639 if (ParGCUseLocalOverflow) {
734 }
735 }
736 }
737 }
738
739 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
740 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
741
742 class ParNewRefProcTaskProxy: public AbstractGangTask {
743 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
744 public:
745 ParNewRefProcTaskProxy(ProcessTask& task,
746 ParNewGeneration& gen,
747 Generation& old_gen,
748 HeapWord* young_old_boundary,
749 ParScanThreadStateSet& state_set);
750
751 private:
752 virtual void work(uint worker_id);
753 private:
754 ParNewGeneration& _young_gen;
755 ProcessTask& _task;
756 Generation& _old_gen;
757 HeapWord* _young_old_boundary;
758 ParScanThreadStateSet& _state_set;
759 };
760
761 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
762 ParNewGeneration& young_gen,
763 Generation& old_gen,
764 HeapWord* young_old_boundary,
765 ParScanThreadStateSet& state_set)
766 : AbstractGangTask("ParNewGeneration parallel reference processing"),
767 _young_gen(young_gen),
768 _task(task),
769 _old_gen(old_gen),
770 _young_old_boundary(young_old_boundary),
771 _state_set(state_set)
772 {
773 }
774
775 void ParNewRefProcTaskProxy::work(uint worker_id)
776 {
777 ResourceMark rm;
778 HandleMark hm;
779 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
780 par_scan_state.set_young_old_boundary(_young_old_boundary);
781 _task.work(worker_id, par_scan_state.is_alive_closure(),
782 par_scan_state.keep_alive_closure(),
783 par_scan_state.evacuate_followers_closure());
784 }
785
786 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
787 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
788 EnqueueTask& _task;
789
790 public:
791 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
792 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
793 _task(task)
794 { }
795
796 virtual void work(uint worker_id)
797 {
798 _task.work(worker_id);
799 }
800 };
801
802
803 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
804 {
805 GenCollectedHeap* gch = GenCollectedHeap::heap();
806 FlexibleWorkGang* workers = gch->workers();
807 assert(workers != NULL, "Need parallel worker threads.");
808 _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
809 ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
810 _young_gen.reserved().end(), _state_set);
811 workers->run_task(&rp_task);
812 _state_set.reset(0 /* bad value in debug if not reset */,
813 _young_gen.promotion_failed());
814 }
815
816 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
817 {
818 GenCollectedHeap* gch = GenCollectedHeap::heap();
819 FlexibleWorkGang* workers = gch->workers();
820 assert(workers != NULL, "Need parallel worker threads.");
821 ParNewRefEnqueueTaskProxy enq_task(task);
822 workers->run_task(&enq_task);
823 }
824
825 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
826 {
827 _state_set.flush();
828 GenCollectedHeap* gch = GenCollectedHeap::heap();
829 gch->save_marks();
830 }
831
832 ScanClosureWithParBarrier::
833 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
834 ScanClosure(g, gc_barrier) {}
835
836 EvacuateFollowersClosureGeneral::
837 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
838 OopsInGenClosure* cur,
839 OopsInGenClosure* older) :
840 _gch(gch),
841 _scan_cur_or_nonheap(cur), _scan_older(older)
842 {}
843
844 void EvacuateFollowersClosureGeneral::do_void() {
845 do {
846 // Beware: this call will lead to closure applications via virtual
847 // calls.
848 _gch->oop_since_save_marks_iterate(Generation::Young,
849 _scan_cur_or_nonheap,
850 _scan_older);
851 } while (!_gch->no_allocs_since_save_marks(true /* include_young */));
852 }
853
854
855 // A Generation that does parallel young-gen collection.
856
857 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
858 assert(_promo_failure_scan_stack.is_empty(), "post condition");
859 _promo_failure_scan_stack.clear(true); // Clear cached segments.
860
861 remove_forwarding_pointers();
862 if (PrintGCDetails) {
863 gclog_or_tty->print(" (promotion failed)");
864 }
865 // All the spaces are in play for mark-sweep.
866 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
867 from()->set_next_compaction_space(to());
868 gch->set_incremental_collection_failed();
869 // Inform the next generation that a promotion failure occurred.
870 _old_gen->promotion_failure_occurred();
871
954 tsk.work(0);
955 }
956 }
957
958 thread_state_set.reset(0 /* Bad value in debug if not reset */,
959 promotion_failed());
960
961 // Trace and reset failed promotion info.
962 if (promotion_failed()) {
963 thread_state_set.trace_promotion_failed(gc_tracer());
964 }
965
966 // Process (weak) reference objects found during scavenge.
967 ReferenceProcessor* rp = ref_processor();
968 IsAliveClosure is_alive(this);
969 ScanWeakRefClosure scan_weak_ref(this);
970 KeepAliveClosure keep_alive(&scan_weak_ref);
971 ScanClosure scan_without_gc_barrier(this, false);
972 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
973 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
974 EvacuateFollowersClosureGeneral evacuate_followers(gch,
975 &scan_without_gc_barrier, &scan_with_gc_barrier);
976 rp->setup_policy(clear_all_soft_refs);
977 // Can the mt_degree be set later (at run_task() time would be best)?
978 rp->set_active_mt_degree(active_workers);
979 ReferenceProcessorStats stats;
980 if (rp->processing_is_mt()) {
981 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
982 stats = rp->process_discovered_references(&is_alive, &keep_alive,
983 &evacuate_followers, &task_executor,
984 _gc_timer, _gc_tracer.gc_id());
985 } else {
986 thread_state_set.flush();
987 gch->save_marks();
988 stats = rp->process_discovered_references(&is_alive, &keep_alive,
989 &evacuate_followers, NULL,
990 _gc_timer, _gc_tracer.gc_id());
991 }
992 _gc_tracer.report_gc_reference_stats(stats);
993 if (!promotion_failed()) {
994 // Swap the survivor spaces.
995 eden()->clear(SpaceDecorator::Mangle);
996 from()->clear(SpaceDecorator::Mangle);
997 if (ZapUnusedHeapArea) {
998 // This is now done here because of the piece-meal mangling which
999 // can check for valid mangling at intermediate points in the
1000 // collection(s). When a minor collection fails to collect
1001 // sufficient space resizing of the young generation can occur
1027 if (PrintGC && !PrintGCDetails) {
1028 gch->print_heap_change(gch_prev_used);
1029 }
1030
1031 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1032 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1033
1034 if (UseAdaptiveSizePolicy) {
1035 size_policy->minor_collection_end(gch->gc_cause());
1036 size_policy->avg_survived()->sample(from()->used());
1037 }
1038
1039 // We need to use a monotonically non-decreasing time in ms
1040 // or we will see time-warp warnings and os::javaTimeMillis()
1041 // does not guarantee monotonicity.
1042 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1043 update_time_of_last_gc(now);
1044
1045 rp->set_enqueuing_is_done(true);
1046 if (rp->processing_is_mt()) {
1047 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1048 rp->enqueue_discovered_references(&task_executor);
1049 } else {
1050 rp->enqueue_discovered_references(NULL);
1051 }
1052 rp->verify_no_references_recorded();
1053
1054 gch->trace_heap_after_gc(gc_tracer());
1055 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1056
1057 _gc_timer->register_gc_end();
1058
1059 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1060 }
1061
1062 static int sum;
1063 void ParNewGeneration::waste_some_time() {
1064 for (int i = 0; i < 100; i++) {
1065 sum += i;
1066 }
1067 }
|