609 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
610 gch->rem_set()->klass_rem_set());
611 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
612 &fsc_with_no_gc_barrier,
613 false);
614
615 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
616 FastEvacuateFollowersClosure evacuate_followers(gch,
617 &fsc_with_no_gc_barrier,
618 &fsc_with_gc_barrier);
619
620 assert(gch->no_allocs_since_save_marks(),
621 "save marks have not been newly set.");
622
623 {
624 // DefNew needs to run with n_threads == 0, to make sure the serial
625 // version of the card table scanning code is used.
626 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
627 StrongRootsScope srs(0);
628
629 gch->gen_process_roots(&srs,
630 GenCollectedHeap::YoungGen,
631 true, // Process younger gens, if any,
632 // as strong roots.
633 GenCollectedHeap::SO_ScavengeCodeCache,
634 GenCollectedHeap::StrongAndWeakRoots,
635 &fsc_with_no_gc_barrier,
636 &fsc_with_gc_barrier,
637 &cld_scan_closure);
638 }
639
640 // "evacuate followers".
641 evacuate_followers.do_void();
642
643 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
644 ReferenceProcessor* rp = ref_processor();
645 rp->setup_policy(clear_all_soft_refs);
646 const ReferenceProcessorStats& stats =
647 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
648 NULL, _gc_timer);
649 gc_tracer.report_gc_reference_stats(stats);
650 gc_tracer.report_tenuring_threshold(tenuring_threshold());
651
652 if (!_promotion_failed) {
653 // Swap the survivor spaces.
654 eden()->clear(SpaceDecorator::Mangle);
655 from()->clear(SpaceDecorator::Mangle);
656 if (ZapUnusedHeapArea) {
657 // This is now done here because of the piece-meal mangling which
833 }
834
835
836 void DefNewGeneration::reset_saved_marks() {
837 eden()->reset_saved_mark();
838 to()->reset_saved_mark();
839 from()->reset_saved_mark();
840 }
841
842
843 bool DefNewGeneration::no_allocs_since_save_marks() {
844 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
845 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
846 return to()->saved_mark_at_top();
847 }
848
849 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
850 \
851 void DefNewGeneration:: \
852 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
853 cl->set_generation(this); \
854 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
855 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
856 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
857 cl->reset_generation(); \
858 save_marks(); \
859 }
860
861 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
862
863 #undef DefNew_SINCE_SAVE_MARKS_DEFN
864
865 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
866 size_t max_alloc_words) {
867 if (requestor == this || _promotion_failed) {
868 return;
869 }
870 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
871
872 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
873 if (to_space->top() > to_space->bottom()) {
874 trace("to_space not empty when contribute_scratch called");
875 }
876 */
877
|
609 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
610 gch->rem_set()->klass_rem_set());
611 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
612 &fsc_with_no_gc_barrier,
613 false);
614
615 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
616 FastEvacuateFollowersClosure evacuate_followers(gch,
617 &fsc_with_no_gc_barrier,
618 &fsc_with_gc_barrier);
619
620 assert(gch->no_allocs_since_save_marks(),
621 "save marks have not been newly set.");
622
623 {
624 // DefNew needs to run with n_threads == 0, to make sure the serial
625 // version of the card table scanning code is used.
626 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
627 StrongRootsScope srs(0);
628
629 gch->young_process_roots(&srs,
630 &fsc_with_no_gc_barrier,
631 &fsc_with_gc_barrier,
632 &cld_scan_closure);
633 }
634
635 // "evacuate followers".
636 evacuate_followers.do_void();
637
638 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
639 ReferenceProcessor* rp = ref_processor();
640 rp->setup_policy(clear_all_soft_refs);
641 const ReferenceProcessorStats& stats =
642 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
643 NULL, _gc_timer);
644 gc_tracer.report_gc_reference_stats(stats);
645 gc_tracer.report_tenuring_threshold(tenuring_threshold());
646
647 if (!_promotion_failed) {
648 // Swap the survivor spaces.
649 eden()->clear(SpaceDecorator::Mangle);
650 from()->clear(SpaceDecorator::Mangle);
651 if (ZapUnusedHeapArea) {
652 // This is now done here because of the piece-meal mangling which
828 }
829
830
831 void DefNewGeneration::reset_saved_marks() {
832 eden()->reset_saved_mark();
833 to()->reset_saved_mark();
834 from()->reset_saved_mark();
835 }
836
837
838 bool DefNewGeneration::no_allocs_since_save_marks() {
839 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
840 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
841 return to()->saved_mark_at_top();
842 }
843
844 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
845 \
846 void DefNewGeneration:: \
847 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
848 cl->assert_generation(this); \
849 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
850 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
851 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
852 save_marks(); \
853 }
854
855 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
856
857 #undef DefNew_SINCE_SAVE_MARKS_DEFN
858
859 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
860 size_t max_alloc_words) {
861 if (requestor == this || _promotion_failed) {
862 return;
863 }
864 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
865
866 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
867 if (to_space->top() > to_space->bottom()) {
868 trace("to_space not empty when contribute_scratch called");
869 }
870 */
871
|