21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/defNewGeneration.inline.hpp"
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shared/gcHeapSummary.hpp"
29 #include "gc/shared/gcLocker.inline.hpp"
30 #include "gc/shared/gcPolicyCounters.hpp"
31 #include "gc/shared/gcTimer.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/gcTraceTime.hpp"
34 #include "gc/shared/genCollectedHeap.hpp"
35 #include "gc/shared/genOopClosures.inline.hpp"
36 #include "gc/shared/genRemSet.hpp"
37 #include "gc/shared/generationSpec.hpp"
38 #include "gc/shared/referencePolicy.hpp"
39 #include "gc/shared/space.inline.hpp"
40 #include "gc/shared/spaceDecorator.hpp"
41 #include "memory/iterator.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51 #if INCLUDE_ALL_GCS
52 #include "gc/cms/parOopClosures.hpp"
53 #endif
54
55 //
56 // DefNewGeneration functions.
57
58 // Methods of protected closure types.
59
60 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
437 (HeapWord*)_virtual_space.high());
438 gch->barrier_set()->resize_covered_region(cmr);
439 if (Verbose && PrintGC) {
440 size_t new_size_after = _virtual_space.committed_size();
441 size_t eden_size_after = eden()->capacity();
442 size_t survivor_size_after = from()->capacity();
443 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
444 SIZE_FORMAT "K [eden="
445 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
446 new_size_before/K, new_size_after/K,
447 eden_size_after/K, survivor_size_after/K);
448 if (WizardMode) {
449 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
450 thread_increase_size/K, threads_count);
451 }
452 gclog_or_tty->cr();
453 }
454 }
455 }
456
457 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
458 assert(false, "NYI -- are you sure you want to call this?");
459 }
460
461
462 size_t DefNewGeneration::capacity() const {
463 return eden()->capacity()
464 + from()->capacity(); // to() is only used during scavenge
465 }
466
467
468 size_t DefNewGeneration::used() const {
469 return eden()->used()
470 + from()->used(); // to() is only used during scavenge
471 }
472
473
474 size_t DefNewGeneration::free() const {
475 return eden()->free()
476 + from()->free(); // to() is only used during scavenge
477 }
608 // Not very pretty.
609 CollectorPolicy* cp = gch->collector_policy();
610
611 FastScanClosure fsc_with_no_gc_barrier(this, false);
612 FastScanClosure fsc_with_gc_barrier(this, true);
613
614 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
615 gch->rem_set()->klass_rem_set());
616 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
617 &fsc_with_no_gc_barrier,
618 false);
619
620 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
621 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
622 &fsc_with_no_gc_barrier,
623 &fsc_with_gc_barrier);
624
625 assert(gch->no_allocs_since_save_marks(0),
626 "save marks have not been newly set.");
627
628 gch->gen_process_roots(_level,
629 true, // Process younger gens, if any,
630 // as strong roots.
631 true, // activate StrongRootsScope
632 GenCollectedHeap::SO_ScavengeCodeCache,
633 GenCollectedHeap::StrongAndWeakRoots,
634 &fsc_with_no_gc_barrier,
635 &fsc_with_gc_barrier,
636 &cld_scan_closure);
637
638 // "evacuate followers".
639 evacuate_followers.do_void();
640
641 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
642 ReferenceProcessor* rp = ref_processor();
643 rp->setup_policy(clear_all_soft_refs);
644 const ReferenceProcessorStats& stats =
645 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
646 NULL, _gc_timer, gc_tracer.gc_id());
647 gc_tracer.report_gc_reference_stats(stats);
648
649 if (!_promotion_failed) {
650 // Swap the survivor spaces.
651 eden()->clear(SpaceDecorator::Mangle);
652 from()->clear(SpaceDecorator::Mangle);
653 if (ZapUnusedHeapArea) {
654 // This is now done here because of the piece-meal mangling which
655 // can check for valid mangling at intermediate points in the
656 // collection(s). When a minor collection fails to collect
|
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/defNewGeneration.inline.hpp"
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shared/gcHeapSummary.hpp"
29 #include "gc/shared/gcLocker.inline.hpp"
30 #include "gc/shared/gcPolicyCounters.hpp"
31 #include "gc/shared/gcTimer.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/gcTraceTime.hpp"
34 #include "gc/shared/genCollectedHeap.hpp"
35 #include "gc/shared/genOopClosures.inline.hpp"
36 #include "gc/shared/genRemSet.hpp"
37 #include "gc/shared/generationSpec.hpp"
38 #include "gc/shared/referencePolicy.hpp"
39 #include "gc/shared/space.inline.hpp"
40 #include "gc/shared/spaceDecorator.hpp"
41 #include "gc/shared/strongRootsScope.hpp"
42 #include "memory/iterator.hpp"
43 #include "oops/instanceRefKlass.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/atomic.inline.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/prefetch.inline.hpp"
48 #include "runtime/thread.inline.hpp"
49 #include "utilities/copy.hpp"
50 #include "utilities/globalDefinitions.hpp"
51 #include "utilities/stack.inline.hpp"
52 #if INCLUDE_ALL_GCS
53 #include "gc/cms/parOopClosures.hpp"
54 #endif
55
56 //
57 // DefNewGeneration functions.
58
59 // Methods of protected closure types.
60
61 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
438 (HeapWord*)_virtual_space.high());
439 gch->barrier_set()->resize_covered_region(cmr);
440 if (Verbose && PrintGC) {
441 size_t new_size_after = _virtual_space.committed_size();
442 size_t eden_size_after = eden()->capacity();
443 size_t survivor_size_after = from()->capacity();
444 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
445 SIZE_FORMAT "K [eden="
446 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
447 new_size_before/K, new_size_after/K,
448 eden_size_after/K, survivor_size_after/K);
449 if (WizardMode) {
450 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
451 thread_increase_size/K, threads_count);
452 }
453 gclog_or_tty->cr();
454 }
455 }
456 }
457
458 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
459 assert(false, "NYI -- are you sure you want to call this?");
460 }
461
462
463 size_t DefNewGeneration::capacity() const {
464 return eden()->capacity()
465 + from()->capacity(); // to() is only used during scavenge
466 }
467
468
469 size_t DefNewGeneration::used() const {
470 return eden()->used()
471 + from()->used(); // to() is only used during scavenge
472 }
473
474
475 size_t DefNewGeneration::free() const {
476 return eden()->free()
477 + from()->free(); // to() is only used during scavenge
478 }
609 // Not very pretty.
610 CollectorPolicy* cp = gch->collector_policy();
611
612 FastScanClosure fsc_with_no_gc_barrier(this, false);
613 FastScanClosure fsc_with_gc_barrier(this, true);
614
615 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
616 gch->rem_set()->klass_rem_set());
617 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
618 &fsc_with_no_gc_barrier,
619 false);
620
621 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
622 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
623 &fsc_with_no_gc_barrier,
624 &fsc_with_gc_barrier);
625
626 assert(gch->no_allocs_since_save_marks(0),
627 "save marks have not been newly set.");
628
629 {
630 // DefNew needs to run with n_threads == 0, to make sure the serial
631 // version of the card table scanning code is used.
632 // See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel.
633 StrongRootsScope srs(0);
634
635 gch->gen_process_roots(&srs,
636 _level,
637 true, // Process younger gens, if any,
638 // as strong roots.
639 GenCollectedHeap::SO_ScavengeCodeCache,
640 GenCollectedHeap::StrongAndWeakRoots,
641 &fsc_with_no_gc_barrier,
642 &fsc_with_gc_barrier,
643 &cld_scan_closure);
644 }
645
646 // "evacuate followers".
647 evacuate_followers.do_void();
648
649 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
650 ReferenceProcessor* rp = ref_processor();
651 rp->setup_policy(clear_all_soft_refs);
652 const ReferenceProcessorStats& stats =
653 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
654 NULL, _gc_timer, gc_tracer.gc_id());
655 gc_tracer.report_gc_reference_stats(stats);
656
657 if (!_promotion_failed) {
658 // Swap the survivor spaces.
659 eden()->clear(SpaceDecorator::Mangle);
660 from()->clear(SpaceDecorator::Mangle);
661 if (ZapUnusedHeapArea) {
662 // This is now done here because of the piece-meal mangling which
663 // can check for valid mangling at intermediate points in the
664 // collection(s). When a minor collection fails to collect
|