39 #include "memory/iterator.hpp"
40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54 //
55 // DefNewGeneration functions.
56
57 // Methods of protected closure types.
58
59 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
60 assert(g->level() == 0, "Optimized for youngest gen.");
61 }
62 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
63 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
64 }
65
66 DefNewGeneration::KeepAliveClosure::
67 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
68 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
69 _rs = (CardTableRS*)rs;
70 }
71
72 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
73 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
74
75
76 DefNewGeneration::FastKeepAliveClosure::
77 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
78 DefNewGeneration::KeepAliveClosure(cl) {
79 _boundary = g->reserved().end();
80 }
81
82 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
83 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
84
85 DefNewGeneration::EvacuateFollowersClosure::
86 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
87 ScanClosure* cur, ScanClosure* older) :
88 _gch(gch), _level(level),
89 _scan_cur_or_nonheap(cur), _scan_older(older)
90 {}
91
92 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
93 do {
94 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
95 _scan_older);
96 } while (!_gch->no_allocs_since_save_marks(_level));
97 }
98
99 DefNewGeneration::FastEvacuateFollowersClosure::
100 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
101 DefNewGeneration* gen,
102 FastScanClosure* cur, FastScanClosure* older) :
103 _gch(gch), _level(level), _gen(gen),
104 _scan_cur_or_nonheap(cur), _scan_older(older)
105 {}
106
107 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
108 do {
109 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
110 _scan_older);
111 } while (!_gch->no_allocs_since_save_marks(_level));
112 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
113 }
114
115 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
116 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
117 {
118 assert(_g->level() == 0, "Optimized for youngest generation");
119 _boundary = _g->reserved().end();
120 }
121
122 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
123 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
124
125 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
126 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
127 {
128 assert(_g->level() == 0, "Optimized for youngest generation");
129 _boundary = _g->reserved().end();
130 }
131
132 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
133 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
134
135 void KlassScanClosure::do_klass(Klass* klass) {
136 #ifndef PRODUCT
137 if (TraceScavenge) {
138 ResourceMark rm;
139 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
140 klass,
141 klass->external_name(),
142 klass->has_modified_oops() ? "true" : "false");
143 }
144 #endif
145
146 // If the klass has not been dirtied we know that there's
147 // no references into the young gen and we can skip it.
148 if (klass->has_modified_oops()) {
149 if (_accumulate_modified_oops) {
150 klass->accumulate_modified_oops();
151 }
152
153 // Clear this state since we're going to scavenge all the metadata.
154 klass->clear_modified_oops();
155
156 // Tell the closure which Klass is being scanned so that it can be dirtied
157 // if oops are left pointing into the young gen.
158 _scavenge_closure->set_scanned_klass(klass);
159
160 klass->oops_do(_scavenge_closure);
161
162 _scavenge_closure->set_scanned_klass(NULL);
163 }
164 }
165
166 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
167 _g(g)
168 {
169 assert(_g->level() == 0, "Optimized for youngest generation");
170 _boundary = _g->reserved().end();
171 }
172
173 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
175
176 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
180 KlassRemSet* klass_rem_set)
181 : _scavenge_closure(scavenge_closure),
182 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
183
184
185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
186 size_t initial_size,
187 int level,
188 const char* policy)
189 : Generation(rs, initial_size, level),
190 _promo_failure_drain_in_progress(false),
191 _should_allocate_from_space(false)
192 {
193 MemRegion cmr((HeapWord*)_virtual_space.low(),
194 (HeapWord*)_virtual_space.high());
195 Universe::heap()->barrier_set()->resize_covered_region(cmr);
196
197 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
198 _eden_space = new ConcEdenSpace(this);
199 } else {
200 _eden_space = new EdenSpace(this);
201 }
202 _from_space = new ContiguousSpace();
203 _to_space = new ContiguousSpace();
204
205 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
206 vm_exit_during_initialization("Could not allocate a new gen space");
207
208 // Compute the maximum eden and survivor space sizes. These sizes
209 // are computed assuming the entire reserved space is committed.
365 if (GC_locker::is_active()) {
366 if (PrintGC && Verbose) {
367 gclog_or_tty->print_cr("Garbage collection disabled, "
368 "expanded heap instead");
369 }
370 }
371
372 return success;
373 }
374
375
376 void DefNewGeneration::compute_new_size() {
377 // This is called after a gc that includes the following generation
378 // (which is required to exist.) So from-space will normally be empty.
379 // Note that we check both spaces, since if scavenge failed they revert roles.
380 // If not we bail out (otherwise we would have to relocate the objects)
381 if (!from()->is_empty() || !to()->is_empty()) {
382 return;
383 }
384
385 int next_level = level() + 1;
386 GenCollectedHeap* gch = GenCollectedHeap::heap();
387 assert(next_level < gch->n_gens(),
388 "DefNewGeneration cannot be an oldest gen");
389
390 Generation* old_gen = gch->old_gen();
391 size_t old_size = old_gen->capacity();
392 size_t new_size_before = _virtual_space.committed_size();
393 size_t min_new_size = spec()->init_size();
394 size_t max_new_size = reserved().byte_size();
395 assert(min_new_size <= new_size_before &&
396 new_size_before <= max_new_size,
397 "just checking");
398 // All space sizes must be multiples of Generation::GenGrain.
399 size_t alignment = Generation::GenGrain;
400
401 // Compute desired new generation size based on NewRatio and
402 // NewSizeThreadIncrease
403 size_t desired_new_size = old_size/NewRatio;
404 int threads_count = Threads::number_of_non_daemon_threads();
405 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
406 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
407
408 // Adjust new generation size
409 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
410 assert(desired_new_size <= max_new_size, "just checking");
411
588
589 init_assuming_no_promotion_failure();
590
591 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
592 // Capture heap used before collection (for printing).
593 size_t gch_prev_used = gch->used();
594
595 gch->trace_heap_before_gc(&gc_tracer);
596
597 SpecializationStats::clear();
598
599 // These can be shared for all code paths
600 IsAliveClosure is_alive(this);
601 ScanWeakRefClosure scan_weak_ref(this);
602
603 age_table()->clear();
604 to()->clear(SpaceDecorator::Mangle);
605
606 gch->rem_set()->prepare_for_younger_refs_iterate(false);
607
608 assert(gch->no_allocs_since_save_marks(0),
609 "save marks have not been newly set.");
610
611 // Not very pretty.
612 CollectorPolicy* cp = gch->collector_policy();
613
614 FastScanClosure fsc_with_no_gc_barrier(this, false);
615 FastScanClosure fsc_with_gc_barrier(this, true);
616
617 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
618 gch->rem_set()->klass_rem_set());
619 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
620 &fsc_with_no_gc_barrier,
621 false);
622
623 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
624 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
625 &fsc_with_no_gc_barrier,
626 &fsc_with_gc_barrier);
627
628 assert(gch->no_allocs_since_save_marks(0),
629 "save marks have not been newly set.");
630
631 gch->gen_process_roots(_level,
632 true, // Process younger gens, if any,
633 // as strong roots.
634 true, // activate StrongRootsScope
635 SharedHeap::SO_ScavengeCodeCache,
636 GenCollectedHeap::StrongAndWeakRoots,
637 &fsc_with_no_gc_barrier,
638 &fsc_with_gc_barrier,
639 &cld_scan_closure);
640
641 // "evacuate followers".
642 evacuate_followers.do_void();
643
644 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
645 ReferenceProcessor* rp = ref_processor();
646 rp->setup_policy(clear_all_soft_refs);
647 const ReferenceProcessorStats& stats =
648 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
649 NULL, _gc_timer, gc_tracer.gc_id());
650 gc_tracer.report_gc_reference_stats(stats);
651
850
851 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
852 \
853 void DefNewGeneration:: \
854 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
855 cl->set_generation(this); \
856 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
857 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
858 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
859 cl->reset_generation(); \
860 save_marks(); \
861 }
862
863 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
864
865 #undef DefNew_SINCE_SAVE_MARKS_DEFN
866
867 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
868 size_t max_alloc_words) {
869 if (requestor == this || _promotion_failed) return;
870 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
871
872 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
873 if (to_space->top() > to_space->bottom()) {
874 trace("to_space not empty when contribute_scratch called");
875 }
876 */
877
878 ContiguousSpace* to_space = to();
879 assert(to_space->end() >= to_space->top(), "pointers out of order");
880 size_t free_words = pointer_delta(to_space->end(), to_space->top());
881 if (free_words >= MinFreeScratchWords) {
882 ScratchBlock* sb = (ScratchBlock*)to_space->top();
883 sb->num_words = free_words;
884 sb->next = list;
885 list = sb;
886 }
887 }
888
889 void DefNewGeneration::reset_scratch() {
890 // If contributing scratch in to_space, mangle all of
|
39 #include "memory/iterator.hpp"
40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54 //
55 // DefNewGeneration functions.
56
57 // Methods of protected closure types.
58
59 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { }
60 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
61 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
62 }
63
64 DefNewGeneration::KeepAliveClosure::
65 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
66 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
67 _rs = (CardTableRS*)rs;
68 }
69
70 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
71 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
72
73
74 DefNewGeneration::FastKeepAliveClosure::
75 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
76 DefNewGeneration::KeepAliveClosure(cl) {
77 _boundary = g->reserved().end();
78 }
79
80 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
81 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
82
83 DefNewGeneration::EvacuateFollowersClosure::
84 EvacuateFollowersClosure(GenCollectedHeap* gch,
85 ScanClosure* cur, ScanClosure* older) :
86 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
87 {}
88
89 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
90 do {
91 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap,
92 _scan_older);
93 } while (!_gch->no_allocs_since_save_marks(Generation::Young));
94 }
95
96 DefNewGeneration::FastEvacuateFollowersClosure::
97 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
98 DefNewGeneration* gen,
99 FastScanClosure* cur, FastScanClosure* older) :
100 _gch(gch), _gen(gen), _scan_cur_or_nonheap(cur), _scan_older(older)
101 {}
102
103 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
104 do {
105 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap,
106 _scan_older);
107 } while (!_gch->no_allocs_since_save_marks(Generation::Young));
108 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
109 }
110
111 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
112 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
113 {
114 _boundary = _g->reserved().end();
115 }
116
117 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
118 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
119
120 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
122 {
123 _boundary = _g->reserved().end();
124 }
125
126 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
127 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
128
129 void KlassScanClosure::do_klass(Klass* klass) {
130 #ifndef PRODUCT
131 if (TraceScavenge) {
132 ResourceMark rm;
133 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
134 klass,
135 klass->external_name(),
136 klass->has_modified_oops() ? "true" : "false");
137 }
138 #endif
139
140 // If the klass has not been dirtied we know that there's
141 // no references into the young gen and we can skip it.
142 if (klass->has_modified_oops()) {
143 if (_accumulate_modified_oops) {
144 klass->accumulate_modified_oops();
145 }
146
147 // Clear this state since we're going to scavenge all the metadata.
148 klass->clear_modified_oops();
149
150 // Tell the closure which Klass is being scanned so that it can be dirtied
151 // if oops are left pointing into the young gen.
152 _scavenge_closure->set_scanned_klass(klass);
153
154 klass->oops_do(_scavenge_closure);
155
156 _scavenge_closure->set_scanned_klass(NULL);
157 }
158 }
159
160 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
161 _g(g)
162 {
163 _boundary = _g->reserved().end();
164 }
165
166 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
167 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
168
169 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
170 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
171
172 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
173 KlassRemSet* klass_rem_set)
174 : _scavenge_closure(scavenge_closure),
175 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
176
177
178 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
179 size_t initial_size,
180 const char* policy)
181 : Generation(rs, initial_size),
182 _promo_failure_drain_in_progress(false),
183 _should_allocate_from_space(false)
184 {
185 MemRegion cmr((HeapWord*)_virtual_space.low(),
186 (HeapWord*)_virtual_space.high());
187 Universe::heap()->barrier_set()->resize_covered_region(cmr);
188
189 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
190 _eden_space = new ConcEdenSpace(this);
191 } else {
192 _eden_space = new EdenSpace(this);
193 }
194 _from_space = new ContiguousSpace();
195 _to_space = new ContiguousSpace();
196
197 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
198 vm_exit_during_initialization("Could not allocate a new gen space");
199
200 // Compute the maximum eden and survivor space sizes. These sizes
201 // are computed assuming the entire reserved space is committed.
357 if (GC_locker::is_active()) {
358 if (PrintGC && Verbose) {
359 gclog_or_tty->print_cr("Garbage collection disabled, "
360 "expanded heap instead");
361 }
362 }
363
364 return success;
365 }
366
367
368 void DefNewGeneration::compute_new_size() {
369 // This is called after a gc that includes the following generation
370 // (which is required to exist.) So from-space will normally be empty.
371 // Note that we check both spaces, since if scavenge failed they revert roles.
372 // If not we bail out (otherwise we would have to relocate the objects)
373 if (!from()->is_empty() || !to()->is_empty()) {
374 return;
375 }
376
377 GenCollectedHeap* gch = GenCollectedHeap::heap();
378
379 size_t old_size = gch->old_gen()->capacity();
380 size_t new_size_before = _virtual_space.committed_size();
381 size_t min_new_size = spec()->init_size();
382 size_t max_new_size = reserved().byte_size();
383 assert(min_new_size <= new_size_before &&
384 new_size_before <= max_new_size,
385 "just checking");
386 // All space sizes must be multiples of Generation::GenGrain.
387 size_t alignment = Generation::GenGrain;
388
389 // Compute desired new generation size based on NewRatio and
390 // NewSizeThreadIncrease
391 size_t desired_new_size = old_size/NewRatio;
392 int threads_count = Threads::number_of_non_daemon_threads();
393 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
394 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
395
396 // Adjust new generation size
397 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
398 assert(desired_new_size <= max_new_size, "just checking");
399
576
577 init_assuming_no_promotion_failure();
578
579 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
580 // Capture heap used before collection (for printing).
581 size_t gch_prev_used = gch->used();
582
583 gch->trace_heap_before_gc(&gc_tracer);
584
585 SpecializationStats::clear();
586
587 // These can be shared for all code paths
588 IsAliveClosure is_alive(this);
589 ScanWeakRefClosure scan_weak_ref(this);
590
591 age_table()->clear();
592 to()->clear(SpaceDecorator::Mangle);
593
594 gch->rem_set()->prepare_for_younger_refs_iterate(false);
595
596 assert(gch->no_allocs_since_save_marks(Generation::Young),
597 "save marks have not been newly set.");
598
599 // Not very pretty.
600 CollectorPolicy* cp = gch->collector_policy();
601
602 FastScanClosure fsc_with_no_gc_barrier(this, false);
603 FastScanClosure fsc_with_gc_barrier(this, true);
604
605 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
606 gch->rem_set()->klass_rem_set());
607 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
608 &fsc_with_no_gc_barrier,
609 false);
610
611 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
612 FastEvacuateFollowersClosure evacuate_followers(gch, this,
613 &fsc_with_no_gc_barrier,
614 &fsc_with_gc_barrier);
615
616 assert(gch->no_allocs_since_save_marks(Generation::Young),
617 "save marks have not been newly set.");
618
619 gch->gen_process_roots(Generation::Young,
620 true, // Process younger gens, if any,
621 // as strong roots.
622 true, // activate StrongRootsScope
623 SharedHeap::SO_ScavengeCodeCache,
624 GenCollectedHeap::StrongAndWeakRoots,
625 &fsc_with_no_gc_barrier,
626 &fsc_with_gc_barrier,
627 &cld_scan_closure);
628
629 // "evacuate followers".
630 evacuate_followers.do_void();
631
632 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
633 ReferenceProcessor* rp = ref_processor();
634 rp->setup_policy(clear_all_soft_refs);
635 const ReferenceProcessorStats& stats =
636 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
637 NULL, _gc_timer, gc_tracer.gc_id());
638 gc_tracer.report_gc_reference_stats(stats);
639
838
839 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
840 \
841 void DefNewGeneration:: \
842 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
843 cl->set_generation(this); \
844 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
845 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
846 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
847 cl->reset_generation(); \
848 save_marks(); \
849 }
850
851 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
852
853 #undef DefNew_SINCE_SAVE_MARKS_DEFN
854
855 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
856 size_t max_alloc_words) {
857 if (requestor == this || _promotion_failed) return;
858 assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation");
859
860 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
861 if (to_space->top() > to_space->bottom()) {
862 trace("to_space not empty when contribute_scratch called");
863 }
864 */
865
866 ContiguousSpace* to_space = to();
867 assert(to_space->end() >= to_space->top(), "pointers out of order");
868 size_t free_words = pointer_delta(to_space->end(), to_space->top());
869 if (free_words >= MinFreeScratchWords) {
870 ScratchBlock* sb = (ScratchBlock*)to_space->top();
871 sb->num_words = free_words;
872 sb->next = list;
873 list = sb;
874 }
875 }
876
877 void DefNewGeneration::reset_scratch() {
878 // If contributing scratch in to_space, mangle all of
|