40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51 #if INCLUDE_ALL_GCS
52 #include "gc_implementation/parNew/parOopClosures.hpp"
53 #endif
54
55 //
56 // DefNewGeneration functions.
57
58 // Methods of protected closure types.
59
60 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
61 assert(g->level() == 0, "Optimized for youngest gen.");
62 }
63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
64 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
65 }
66
67 DefNewGeneration::KeepAliveClosure::
68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
69 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
70 _rs = (CardTableRS*)rs;
71 }
72
73 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
74 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
75
76
77 DefNewGeneration::FastKeepAliveClosure::
78 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
79 DefNewGeneration::KeepAliveClosure(cl) {
80 _boundary = g->reserved().end();
81 }
82
83 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
84 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
85
86 DefNewGeneration::EvacuateFollowersClosure::
87 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
88 ScanClosure* cur, ScanClosure* older) :
89 _gch(gch), _level(level),
90 _scan_cur_or_nonheap(cur), _scan_older(older)
91 {}
92
93 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
94 do {
95 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
96 _scan_older);
97 } while (!_gch->no_allocs_since_save_marks(_level));
98 }
99
100 DefNewGeneration::FastEvacuateFollowersClosure::
101 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
102 DefNewGeneration* gen,
103 FastScanClosure* cur, FastScanClosure* older) :
104 _gch(gch), _level(level), _gen(gen),
105 _scan_cur_or_nonheap(cur), _scan_older(older)
106 {}
107
108 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
109 do {
110 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
111 _scan_older);
112 } while (!_gch->no_allocs_since_save_marks(_level));
113 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
114 }
115
116 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
117 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
118 {
119 assert(_g->level() == 0, "Optimized for youngest generation");
120 _boundary = _g->reserved().end();
121 }
122
123 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
124 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
125
126 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
127 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
128 {
129 assert(_g->level() == 0, "Optimized for youngest generation");
130 _boundary = _g->reserved().end();
131 }
132
133 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
134 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
135
136 void KlassScanClosure::do_klass(Klass* klass) {
137 #ifndef PRODUCT
138 if (TraceScavenge) {
139 ResourceMark rm;
140 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
141 p2i(klass),
142 klass->external_name(),
143 klass->has_modified_oops() ? "true" : "false");
144 }
145 #endif
146
147 // If the klass has not been dirtied we know that there's
148 // no references into the young gen and we can skip it.
149 if (klass->has_modified_oops()) {
150 if (_accumulate_modified_oops) {
151 klass->accumulate_modified_oops();
152 }
153
154 // Clear this state since we're going to scavenge all the metadata.
155 klass->clear_modified_oops();
156
157 // Tell the closure which Klass is being scanned so that it can be dirtied
158 // if oops are left pointing into the young gen.
159 _scavenge_closure->set_scanned_klass(klass);
160
161 klass->oops_do(_scavenge_closure);
162
163 _scavenge_closure->set_scanned_klass(NULL);
164 }
165 }
166
167 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
168 _g(g)
169 {
170 assert(_g->level() == 0, "Optimized for youngest generation");
171 _boundary = _g->reserved().end();
172 }
173
174 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
176
177 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
179
180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
181 KlassRemSet* klass_rem_set)
182 : _scavenge_closure(scavenge_closure),
183 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
184
185
186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
187 size_t initial_size,
188 int level,
189 const char* policy)
190 : Generation(rs, initial_size, level),
191 _promo_failure_drain_in_progress(false),
192 _should_allocate_from_space(false)
193 {
194 MemRegion cmr((HeapWord*)_virtual_space.low(),
195 (HeapWord*)_virtual_space.high());
196 GenCollectedHeap* gch = GenCollectedHeap::heap();
197
198 gch->barrier_set()->resize_covered_region(cmr);
199
200 _eden_space = new ContiguousSpace();
201 _from_space = new ContiguousSpace();
202 _to_space = new ContiguousSpace();
203
204 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
205 vm_exit_during_initialization("Could not allocate a new gen space");
206
207 // Compute the maximum eden and survivor space sizes. These sizes
208 // are computed assuming the entire reserved space is committed.
209 // These values are exported as performance counters.
210 uintx alignment = gch->collector_policy()->space_alignment();
354 }
355
356 // Do not attempt an expand-to-the reserve size. The
357 // request should properly observe the maximum size of
358 // the generation so an expand-to-reserve should be
359 // unnecessary. Also a second call to expand-to-reserve
360 // value potentially can cause an undue expansion.
361 // For example if the first expand fail for unknown reasons,
362 // but the second succeeds and expands the heap to its maximum
363 // value.
364 if (GC_locker::is_active()) {
365 if (PrintGC && Verbose) {
366 gclog_or_tty->print_cr("Garbage collection disabled, "
367 "expanded heap instead");
368 }
369 }
370
371 return success;
372 }
373
374
375 void DefNewGeneration::compute_new_size() {
376 // This is called after a gc that includes the following generation
377 // (which is required to exist.) So from-space will normally be empty.
378 // Note that we check both spaces, since if scavenge failed they revert roles.
379 // If not we bail out (otherwise we would have to relocate the objects)
380 if (!from()->is_empty() || !to()->is_empty()) {
381 return;
382 }
383
384 int next_level = level() + 1;
385 GenCollectedHeap* gch = GenCollectedHeap::heap();
386 assert(next_level == 1, "DefNewGeneration must be a young gen");
387
388 Generation* old_gen = gch->old_gen();
389 size_t old_size = old_gen->capacity();
390 size_t new_size_before = _virtual_space.committed_size();
391 size_t min_new_size = spec()->init_size();
392 size_t max_new_size = reserved().byte_size();
393 assert(min_new_size <= new_size_before &&
394 new_size_before <= max_new_size,
395 "just checking");
396 // All space sizes must be multiples of Generation::GenGrain.
397 size_t alignment = Generation::GenGrain;
398
399 // Compute desired new generation size based on NewRatio and
400 // NewSizeThreadIncrease
401 size_t desired_new_size = old_size/NewRatio;
402 int threads_count = Threads::number_of_non_daemon_threads();
403 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
404 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
405
406 // Adjust new generation size
407 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
408 assert(desired_new_size <= max_new_size, "just checking");
409
585 }
586 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
587
588 init_assuming_no_promotion_failure();
589
590 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
591 // Capture heap used before collection (for printing).
592 size_t gch_prev_used = gch->used();
593
594 gch->trace_heap_before_gc(&gc_tracer);
595
596 // These can be shared for all code paths
597 IsAliveClosure is_alive(this);
598 ScanWeakRefClosure scan_weak_ref(this);
599
600 age_table()->clear();
601 to()->clear(SpaceDecorator::Mangle);
602
603 gch->rem_set()->prepare_for_younger_refs_iterate(false);
604
605 assert(gch->no_allocs_since_save_marks(0),
606 "save marks have not been newly set.");
607
608 // Not very pretty.
609 CollectorPolicy* cp = gch->collector_policy();
610
611 FastScanClosure fsc_with_no_gc_barrier(this, false);
612 FastScanClosure fsc_with_gc_barrier(this, true);
613
614 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
615 gch->rem_set()->klass_rem_set());
616 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
617 &fsc_with_no_gc_barrier,
618 false);
619
620 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
621 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
622 &fsc_with_no_gc_barrier,
623 &fsc_with_gc_barrier);
624
625 assert(gch->no_allocs_since_save_marks(0),
626 "save marks have not been newly set.");
627
628 gch->gen_process_roots(_level,
629 true, // Process younger gens, if any,
630 // as strong roots.
631 true, // activate StrongRootsScope
632 GenCollectedHeap::SO_ScavengeCodeCache,
633 GenCollectedHeap::StrongAndWeakRoots,
634 &fsc_with_no_gc_barrier,
635 &fsc_with_gc_barrier,
636 &cld_scan_closure);
637
638 // "evacuate followers".
639 evacuate_followers.do_void();
640
641 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
642 ReferenceProcessor* rp = ref_processor();
643 rp->setup_policy(clear_all_soft_refs);
644 const ReferenceProcessorStats& stats =
645 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
646 NULL, _gc_timer, gc_tracer.gc_id());
647 gc_tracer.report_gc_reference_stats(stats);
648
845 }
846
847 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
848 \
849 void DefNewGeneration:: \
850 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
851 cl->set_generation(this); \
852 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
853 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
854 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
855 cl->reset_generation(); \
856 save_marks(); \
857 }
858
859 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
860
861 #undef DefNew_SINCE_SAVE_MARKS_DEFN
862
863 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
864 size_t max_alloc_words) {
865 if (requestor == this || _promotion_failed) return;
866 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
867
868 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
869 if (to_space->top() > to_space->bottom()) {
870 trace("to_space not empty when contribute_scratch called");
871 }
872 */
873
874 ContiguousSpace* to_space = to();
875 assert(to_space->end() >= to_space->top(), "pointers out of order");
876 size_t free_words = pointer_delta(to_space->end(), to_space->top());
877 if (free_words >= MinFreeScratchWords) {
878 ScratchBlock* sb = (ScratchBlock*)to_space->top();
879 sb->num_words = free_words;
880 sb->next = list;
881 list = sb;
882 }
883 }
884
885 void DefNewGeneration::reset_scratch() {
886 // If contributing scratch in to_space, mangle all of
|
40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51 #if INCLUDE_ALL_GCS
52 #include "gc_implementation/parNew/parOopClosures.hpp"
53 #endif
54
55 //
56 // DefNewGeneration functions.
57
58 // Methods of protected closure types.
59
60 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* gen) : _gen(gen) {
61 assert(_gen == GenCollectedHeap::heap()->young_gen(), "Expected the young generation here");
62 }
63
64 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
65 return (HeapWord*)p >= _gen->reserved().end() || p->is_forwarded();
66 }
67
68 DefNewGeneration::KeepAliveClosure::
69 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
70 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
71 _rs = (CardTableRS*)rs;
72 }
73
74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
76
77
78 DefNewGeneration::FastKeepAliveClosure::
79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
80 DefNewGeneration::KeepAliveClosure(cl) {
81 _boundary = g->reserved().end();
82 }
83
84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
86
87 DefNewGeneration::EvacuateFollowersClosure::
88 EvacuateFollowersClosure(GenCollectedHeap* gch,
89 ScanClosure* cur,
90 ScanClosure* older) :
91 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
92 {}
93
94 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
95 do {
96 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap,
97 _scan_older);
98 } while (!_gch->no_allocs_since_save_marks(Generation::Young));
99 }
100
101 DefNewGeneration::FastEvacuateFollowersClosure::
102 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
103 FastScanClosure* cur,
104 FastScanClosure* older) :
105 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
106 {
107 assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
108 _gen = (DefNewGeneration*)_gch->young_gen();
109 }
110
111 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
112 do {
113 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older);
114 } while (!_gch->no_allocs_since_save_marks(Generation::Young));
115 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
116 }
117
118 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
119 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
120 {
121 _boundary = _g->reserved().end();
122 }
123
124 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
126
127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
128 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
129 {
130 _boundary = _g->reserved().end();
131 }
132
133 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
134 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
135
136 void KlassScanClosure::do_klass(Klass* klass) {
137 #ifndef PRODUCT
138 if (TraceScavenge) {
139 ResourceMark rm;
140 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
141 p2i(klass),
142 klass->external_name(),
143 klass->has_modified_oops() ? "true" : "false");
144 }
145 #endif
146
147 // If the klass has not been dirtied we know that there's
148 // no references into the young gen and we can skip it.
149 if (klass->has_modified_oops()) {
150 if (_accumulate_modified_oops) {
151 klass->accumulate_modified_oops();
152 }
153
154 // Clear this state since we're going to scavenge all the metadata.
155 klass->clear_modified_oops();
156
157 // Tell the closure which Klass is being scanned so that it can be dirtied
158 // if oops are left pointing into the young gen.
159 _scavenge_closure->set_scanned_klass(klass);
160
161 klass->oops_do(_scavenge_closure);
162
163 _scavenge_closure->set_scanned_klass(NULL);
164 }
165 }
166
167 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
168 _g(g)
169 {
170 _boundary = _g->reserved().end();
171 }
172
173 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
175
176 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
180 KlassRemSet* klass_rem_set)
181 : _scavenge_closure(scavenge_closure),
182 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
183
184
185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
186 size_t initial_size,
187 const char* policy)
188 : Generation(rs, initial_size),
189 _promo_failure_drain_in_progress(false),
190 _should_allocate_from_space(false)
191 {
192 MemRegion cmr((HeapWord*)_virtual_space.low(),
193 (HeapWord*)_virtual_space.high());
194 GenCollectedHeap* gch = GenCollectedHeap::heap();
195
196 gch->barrier_set()->resize_covered_region(cmr);
197
198 _eden_space = new ContiguousSpace();
199 _from_space = new ContiguousSpace();
200 _to_space = new ContiguousSpace();
201
202 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
203 vm_exit_during_initialization("Could not allocate a new gen space");
204
205 // Compute the maximum eden and survivor space sizes. These sizes
206 // are computed assuming the entire reserved space is committed.
207 // These values are exported as performance counters.
208 uintx alignment = gch->collector_policy()->space_alignment();
352 }
353
354 // Do not attempt an expand-to-the reserve size. The
355 // request should properly observe the maximum size of
356 // the generation so an expand-to-reserve should be
357 // unnecessary. Also a second call to expand-to-reserve
358 // value potentially can cause an undue expansion.
359 // For example if the first expand fail for unknown reasons,
360 // but the second succeeds and expands the heap to its maximum
361 // value.
362 if (GC_locker::is_active()) {
363 if (PrintGC && Verbose) {
364 gclog_or_tty->print_cr("Garbage collection disabled, "
365 "expanded heap instead");
366 }
367 }
368
369 return success;
370 }
371
372 void DefNewGeneration::compute_new_size() {
373 // This is called after a GC that includes the old generation, so from-space
374 // will normally be empty.
375 // Note that we check both spaces, since if scavenge failed they revert roles.
376 // If not we bail out (otherwise we would have to relocate the objects).
377 if (!from()->is_empty() || !to()->is_empty()) {
378 return;
379 }
380
381 GenCollectedHeap* gch = GenCollectedHeap::heap();
382
383 size_t old_size = gch->old_gen()->capacity();
384 size_t new_size_before = _virtual_space.committed_size();
385 size_t min_new_size = spec()->init_size();
386 size_t max_new_size = reserved().byte_size();
387 assert(min_new_size <= new_size_before &&
388 new_size_before <= max_new_size,
389 "just checking");
390 // All space sizes must be multiples of Generation::GenGrain.
391 size_t alignment = Generation::GenGrain;
392
393 // Compute desired new generation size based on NewRatio and
394 // NewSizeThreadIncrease
395 size_t desired_new_size = old_size/NewRatio;
396 int threads_count = Threads::number_of_non_daemon_threads();
397 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
398 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
399
400 // Adjust new generation size
401 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
402 assert(desired_new_size <= max_new_size, "just checking");
403
579 }
580 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
581
582 init_assuming_no_promotion_failure();
583
584 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
585 // Capture heap used before collection (for printing).
586 size_t gch_prev_used = gch->used();
587
588 gch->trace_heap_before_gc(&gc_tracer);
589
590 // These can be shared for all code paths
591 IsAliveClosure is_alive(this);
592 ScanWeakRefClosure scan_weak_ref(this);
593
594 age_table()->clear();
595 to()->clear(SpaceDecorator::Mangle);
596
597 gch->rem_set()->prepare_for_younger_refs_iterate(false);
598
599 assert(gch->no_allocs_since_save_marks(Generation::Young),
600 "save marks have not been newly set.");
601
602 // Not very pretty.
603 CollectorPolicy* cp = gch->collector_policy();
604
605 FastScanClosure fsc_with_no_gc_barrier(this, false);
606 FastScanClosure fsc_with_gc_barrier(this, true);
607
608 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
609 gch->rem_set()->klass_rem_set());
610 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
611 &fsc_with_no_gc_barrier,
612 false);
613
614 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
615 FastEvacuateFollowersClosure evacuate_followers(gch,
616 &fsc_with_no_gc_barrier,
617 &fsc_with_gc_barrier);
618
619 assert(gch->no_allocs_since_save_marks(Generation::Young),
620 "save marks have not been newly set.");
621
622 gch->gen_process_roots(Generation::Young,
623 true, // Process younger gens, if any,
624 // as strong roots.
625 true, // activate StrongRootsScope
626 GenCollectedHeap::SO_ScavengeCodeCache,
627 GenCollectedHeap::StrongAndWeakRoots,
628 &fsc_with_no_gc_barrier,
629 &fsc_with_gc_barrier,
630 &cld_scan_closure);
631
632 // "evacuate followers".
633 evacuate_followers.do_void();
634
635 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
636 ReferenceProcessor* rp = ref_processor();
637 rp->setup_policy(clear_all_soft_refs);
638 const ReferenceProcessorStats& stats =
639 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
640 NULL, _gc_timer, gc_tracer.gc_id());
641 gc_tracer.report_gc_reference_stats(stats);
642
839 }
840
841 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
842 \
843 void DefNewGeneration:: \
844 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
845 cl->set_generation(this); \
846 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
847 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
848 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
849 cl->reset_generation(); \
850 save_marks(); \
851 }
852
853 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
854
855 #undef DefNew_SINCE_SAVE_MARKS_DEFN
856
857 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
858 size_t max_alloc_words) {
859 if (requestor == this || _promotion_failed) {
860 return;
861 }
862 assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation");
863
864 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
865 if (to_space->top() > to_space->bottom()) {
866 trace("to_space not empty when contribute_scratch called");
867 }
868 */
869
870 ContiguousSpace* to_space = to();
871 assert(to_space->end() >= to_space->top(), "pointers out of order");
872 size_t free_words = pointer_delta(to_space->end(), to_space->top());
873 if (free_words >= MinFreeScratchWords) {
874 ScratchBlock* sb = (ScratchBlock*)to_space->top();
875 sb->num_words = free_words;
876 sb->next = list;
877 list = sb;
878 }
879 }
880
881 void DefNewGeneration::reset_scratch() {
882 // If contributing scratch in to_space, mangle all of
|