39 #include "memory/iterator.hpp"
40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54 //
55 // DefNewGeneration functions.
56
57 // Methods of protected closure types.
58
59 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
60 assert(g->level() == 0, "Optimized for youngest gen.");
61 }
62 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
63 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
64 }
65
66 DefNewGeneration::KeepAliveClosure::
67 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
68 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
69 _rs = (CardTableRS*)rs;
70 }
71
72 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
73 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
74
75
76 DefNewGeneration::FastKeepAliveClosure::
77 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
78 DefNewGeneration::KeepAliveClosure(cl) {
79 _boundary = g->reserved().end();
80 }
81
82 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
83 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
84
85 DefNewGeneration::EvacuateFollowersClosure::
86 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
87 ScanClosure* cur, ScanClosure* older) :
88 _gch(gch), _level(level),
89 _scan_cur_or_nonheap(cur), _scan_older(older)
90 {}
91
92 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
93 do {
94 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
95 _scan_older);
96 } while (!_gch->no_allocs_since_save_marks(_level));
97 }
98
99 DefNewGeneration::FastEvacuateFollowersClosure::
100 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
101 DefNewGeneration* gen,
102 FastScanClosure* cur, FastScanClosure* older) :
103 _gch(gch), _level(level), _gen(gen),
104 _scan_cur_or_nonheap(cur), _scan_older(older)
105 {}
106
107 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
108 do {
109 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
110 _scan_older);
111 } while (!_gch->no_allocs_since_save_marks(_level));
112 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
113 }
114
115 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
116 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
117 {
118 assert(_g->level() == 0, "Optimized for youngest generation");
119 _boundary = _g->reserved().end();
120 }
121
122 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
123 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
124
125 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
126 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
127 {
128 assert(_g->level() == 0, "Optimized for youngest generation");
129 _boundary = _g->reserved().end();
130 }
131
132 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
133 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
134
135 void KlassScanClosure::do_klass(Klass* klass) {
136 #ifndef PRODUCT
137 if (TraceScavenge) {
138 ResourceMark rm;
139 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
140 klass,
141 klass->external_name(),
142 klass->has_modified_oops() ? "true" : "false");
143 }
144 #endif
145
146 // If the klass has not been dirtied we know that there's
147 // no references into the young gen and we can skip it.
148 if (klass->has_modified_oops()) {
149 if (_accumulate_modified_oops) {
150 klass->accumulate_modified_oops();
151 }
152
153 // Clear this state since we're going to scavenge all the metadata.
154 klass->clear_modified_oops();
155
156 // Tell the closure which Klass is being scanned so that it can be dirtied
157 // if oops are left pointing into the young gen.
158 _scavenge_closure->set_scanned_klass(klass);
159
160 klass->oops_do(_scavenge_closure);
161
162 _scavenge_closure->set_scanned_klass(NULL);
163 }
164 }
165
166 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
167 _g(g)
168 {
169 assert(_g->level() == 0, "Optimized for youngest generation");
170 _boundary = _g->reserved().end();
171 }
172
173 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
175
176 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
180 KlassRemSet* klass_rem_set)
181 : _scavenge_closure(scavenge_closure),
182 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
183
184
185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
186 size_t initial_size,
187 int level,
188 const char* policy)
189 : Generation(rs, initial_size, level),
190 _promo_failure_drain_in_progress(false),
191 _should_allocate_from_space(false)
192 {
193 MemRegion cmr((HeapWord*)_virtual_space.low(),
194 (HeapWord*)_virtual_space.high());
195 Universe::heap()->barrier_set()->resize_covered_region(cmr);
196
197 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
198 _eden_space = new ConcEdenSpace(this);
199 } else {
200 _eden_space = new EdenSpace(this);
201 }
202 _from_space = new ContiguousSpace();
203 _to_space = new ContiguousSpace();
204
205 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
206 vm_exit_during_initialization("Could not allocate a new gen space");
207
208 // Compute the maximum eden and survivor space sizes. These sizes
209 // are computed assuming the entire reserved space is committed.
213 _max_survivor_size = compute_survivor_size(size, alignment);
214 _max_eden_size = size - (2*_max_survivor_size);
215
216 // allocate the performance counters
217 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
218
219 // Generation counters -- generation 0, 3 subspaces
220 _gen_counters = new GenerationCounters("new", 0, 3,
221 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
222 _gc_counters = new CollectorCounters(policy, 0);
223
224 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
225 _gen_counters);
226 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
227 _gen_counters);
228 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
229 _gen_counters);
230
231 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
232 update_counters();
233 _next_gen = NULL;
234 _tenuring_threshold = MaxTenuringThreshold;
235 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
236
237 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
238 }
239
240 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
241 bool clear_space,
242 bool mangle_space) {
243 uintx alignment =
244 GenCollectedHeap::heap()->collector_policy()->space_alignment();
245
246 // If the spaces are being cleared (only done at heap initialization
247 // currently), the survivor spaces need not be empty.
248 // Otherwise, no care is taken for used areas in the survivor spaces
249 // so check.
250 assert(clear_space || (to()->is_empty() && from()->is_empty()),
251 "Initialization of the survivor spaces assumes these are empty");
252
253 // Compute sizes
365 if (GC_locker::is_active()) {
366 if (PrintGC && Verbose) {
367 gclog_or_tty->print_cr("Garbage collection disabled, "
368 "expanded heap instead");
369 }
370 }
371
372 return success;
373 }
374
375
376 void DefNewGeneration::compute_new_size() {
377 // This is called after a gc that includes the following generation
378 // (which is required to exist.) So from-space will normally be empty.
379 // Note that we check both spaces, since if scavenge failed they revert roles.
380 // If not we bail out (otherwise we would have to relocate the objects)
381 if (!from()->is_empty() || !to()->is_empty()) {
382 return;
383 }
384
385 int next_level = level() + 1;
386 GenCollectedHeap* gch = GenCollectedHeap::heap();
387 assert(next_level < gch->_n_gens,
388 "DefNewGeneration cannot be an oldest gen");
389
390 Generation* next_gen = gch->_gens[next_level];
391 size_t old_size = next_gen->capacity();
392 size_t new_size_before = _virtual_space.committed_size();
393 size_t min_new_size = spec()->init_size();
394 size_t max_new_size = reserved().byte_size();
395 assert(min_new_size <= new_size_before &&
396 new_size_before <= max_new_size,
397 "just checking");
398 // All space sizes must be multiples of Generation::GenGrain.
399 size_t alignment = Generation::GenGrain;
400
401 // Compute desired new generation size based on NewRatio and
402 // NewSizeThreadIncrease
403 size_t desired_new_size = old_size/NewRatio;
404 int threads_count = Threads::number_of_non_daemon_threads();
405 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
406 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
407
408 // Adjust new generation size
409 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
410 assert(desired_new_size <= max_new_size, "just checking");
411
555 }
556
557 void DefNewGeneration::adjust_desired_tenuring_threshold() {
558 // Set the desired survivor size to half the real survivor space
559 _tenuring_threshold =
560 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
561 }
562
563 void DefNewGeneration::collect(bool full,
564 bool clear_all_soft_refs,
565 size_t size,
566 bool is_tlab) {
567 assert(full || size > 0, "otherwise we don't want to collect");
568
569 GenCollectedHeap* gch = GenCollectedHeap::heap();
570
571 _gc_timer->register_gc_start();
572 DefNewTracer gc_tracer;
573 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
574
575 _next_gen = gch->next_gen(this);
576
577 // If the next generation is too full to accommodate promotion
578 // from this generation, pass on collection; let the next generation
579 // do it.
580 if (!collection_attempt_is_safe()) {
581 if (Verbose && PrintGCDetails) {
582 gclog_or_tty->print(" :: Collection attempt not safe :: ");
583 }
584 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
585 return;
586 }
587 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
588
589 init_assuming_no_promotion_failure();
590
591 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
592 // Capture heap used before collection (for printing).
593 size_t gch_prev_used = gch->used();
594
595 gch->trace_heap_before_gc(&gc_tracer);
596
597 SpecializationStats::clear();
598
599 // These can be shared for all code paths
600 IsAliveClosure is_alive(this);
601 ScanWeakRefClosure scan_weak_ref(this);
602
603 age_table()->clear();
604 to()->clear(SpaceDecorator::Mangle);
605
606 gch->rem_set()->prepare_for_younger_refs_iterate(false);
607
608 assert(gch->no_allocs_since_save_marks(0),
609 "save marks have not been newly set.");
610
611 // Not very pretty.
612 CollectorPolicy* cp = gch->collector_policy();
613
614 FastScanClosure fsc_with_no_gc_barrier(this, false);
615 FastScanClosure fsc_with_gc_barrier(this, true);
616
617 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
618 gch->rem_set()->klass_rem_set());
619 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
620 &fsc_with_no_gc_barrier,
621 false);
622
623 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
624 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
625 &fsc_with_no_gc_barrier,
626 &fsc_with_gc_barrier);
627
628 assert(gch->no_allocs_since_save_marks(0),
629 "save marks have not been newly set.");
630
631 gch->gen_process_roots(_level,
632 true, // Process younger gens, if any,
633 // as strong roots.
634 true, // activate StrongRootsScope
635 SharedHeap::SO_ScavengeCodeCache,
636 GenCollectedHeap::StrongAndWeakRoots,
637 &fsc_with_no_gc_barrier,
638 &fsc_with_gc_barrier,
639 &cld_scan_closure);
640
641 // "evacuate followers".
642 evacuate_followers.do_void();
643
644 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
645 ReferenceProcessor* rp = ref_processor();
646 rp->setup_policy(clear_all_soft_refs);
647 const ReferenceProcessorStats& stats =
648 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
649 NULL, _gc_timer, gc_tracer.gc_id());
650 gc_tracer.report_gc_reference_stats(stats);
651
675 size_policy->reset_gc_overhead_limit_count();
676 assert(!gch->incremental_collection_failed(), "Should be clear");
677 } else {
678 assert(_promo_failure_scan_stack.is_empty(), "post condition");
679 _promo_failure_scan_stack.clear(true); // Clear cached segments.
680
681 remove_forwarding_pointers();
682 if (PrintGCDetails) {
683 gclog_or_tty->print(" (promotion failed) ");
684 }
685 // Add to-space to the list of space to compact
686 // when a promotion failure has occurred. In that
687 // case there can be live objects in to-space
688 // as a result of a partial evacuation of eden
689 // and from-space.
690 swap_spaces(); // For uniformity wrt ParNewGeneration.
691 from()->set_next_compaction_space(to());
692 gch->set_incremental_collection_failed();
693
694 // Inform the next generation that a promotion failure occurred.
695 _next_gen->promotion_failure_occurred();
696 gc_tracer.report_promotion_failed(_promotion_failed_info);
697
698 // Reset the PromotionFailureALot counters.
699 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
700 }
701 if (PrintGC && !PrintGCDetails) {
702 gch->print_heap_change(gch_prev_used);
703 }
704 // set new iteration safe limit for the survivor spaces
705 from()->set_concurrent_iteration_safe_limit(from()->top());
706 to()->set_concurrent_iteration_safe_limit(to()->top());
707 SpecializationStats::print();
708
709 // We need to use a monotonically non-decreasing time in ms
710 // or we will see time-warp warnings and os::javaTimeMillis()
711 // does not guarantee monotonicity.
712 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
713 update_time_of_last_gc(now);
714
715 gch->trace_heap_after_gc(&gc_tracer);
780 // prevent recursion in copy_to_survivor_space()
781 _promo_failure_drain_in_progress = true;
782 drain_promo_failure_scan_stack();
783 _promo_failure_drain_in_progress = false;
784 }
785 }
786
787 oop DefNewGeneration::copy_to_survivor_space(oop old) {
788 assert(is_in_reserved(old) && !old->is_forwarded(),
789 "shouldn't be scavenging this oop");
790 size_t s = old->size();
791 oop obj = NULL;
792
793 // Try allocating obj in to-space (unless too old)
794 if (old->age() < tenuring_threshold()) {
795 obj = (oop) to()->allocate_aligned(s);
796 }
797
798 // Otherwise try allocating obj tenured
799 if (obj == NULL) {
800 obj = _next_gen->promote(old, s);
801 if (obj == NULL) {
802 handle_promotion_failure(old);
803 return old;
804 }
805 } else {
806 // Prefetch beyond obj
807 const intx interval = PrefetchCopyIntervalInBytes;
808 Prefetch::write(obj, interval);
809
810 // Copy obj
811 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
812
813 // Increment age if obj still in new generation
814 obj->incr_age();
815 age_table()->add(obj, s);
816 }
817
818 // Done, insert forward pointer to obj in this header
819 old->forward_to(obj);
820
849 }
850
851 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
852 \
853 void DefNewGeneration:: \
854 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
855 cl->set_generation(this); \
856 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
857 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
858 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
859 cl->reset_generation(); \
860 save_marks(); \
861 }
862
863 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
864
865 #undef DefNew_SINCE_SAVE_MARKS_DEFN
866
867 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
868 size_t max_alloc_words) {
869 if (requestor == this || _promotion_failed) return;
870 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
871
872 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
873 if (to_space->top() > to_space->bottom()) {
874 trace("to_space not empty when contribute_scratch called");
875 }
876 */
877
878 ContiguousSpace* to_space = to();
879 assert(to_space->end() >= to_space->top(), "pointers out of order");
880 size_t free_words = pointer_delta(to_space->end(), to_space->top());
881 if (free_words >= MinFreeScratchWords) {
882 ScratchBlock* sb = (ScratchBlock*)to_space->top();
883 sb->num_words = free_words;
884 sb->next = list;
885 list = sb;
886 }
887 }
888
889 void DefNewGeneration::reset_scratch() {
890 // If contributing scratch in to_space, mangle all of
891 // to_space if ZapUnusedHeapArea. This is needed because
892 // top is not maintained while using to-space as scratch.
893 if (ZapUnusedHeapArea) {
894 to()->mangle_unused_area_complete();
895 }
896 }
897
898 bool DefNewGeneration::collection_attempt_is_safe() {
899 if (!to()->is_empty()) {
900 if (Verbose && PrintGCDetails) {
901 gclog_or_tty->print(" :: to is not empty :: ");
902 }
903 return false;
904 }
905 if (_next_gen == NULL) {
906 GenCollectedHeap* gch = GenCollectedHeap::heap();
907 _next_gen = gch->next_gen(this);
908 }
909 return _next_gen->promotion_attempt_is_safe(used());
910 }
911
912 void DefNewGeneration::gc_epilogue(bool full) {
913 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
914
915 assert(!GC_locker::is_active(), "We should not be executing here");
916 // Check if the heap is approaching full after a collection has
917 // been done. Generally the young generation is empty at
918 // a minimum at the end of a collection. If it is not, then
919 // the heap is approaching full.
920 GenCollectedHeap* gch = GenCollectedHeap::heap();
921 if (full) {
922 DEBUG_ONLY(seen_incremental_collection_failed = false;)
923 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
924 if (Verbose && PrintGCDetails) {
925 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
926 GCCause::to_string(gch->gc_cause()));
927 }
928 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
929 set_should_allocate_from_space(); // we seem to be running out of space
1009 void DefNewGeneration::print_on(outputStream* st) const {
1010 Generation::print_on(st);
1011 st->print(" eden");
1012 eden()->print_on(st);
1013 st->print(" from");
1014 from()->print_on(st);
1015 st->print(" to ");
1016 to()->print_on(st);
1017 }
1018
1019
1020 const char* DefNewGeneration::name() const {
1021 return "def new generation";
1022 }
1023
1024 // Moved from inline file as they are not called inline
1025 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1026 return eden();
1027 }
1028
1029 HeapWord* DefNewGeneration::allocate(size_t word_size,
1030 bool is_tlab) {
1031 // This is the slow-path allocation for the DefNewGeneration.
1032 // Most allocations are fast-path in compiled code.
1033 // We try to allocate from the eden. If that works, we are happy.
1034 // Note that since DefNewGeneration supports lock-free allocation, we
1035 // have to use it here, as well.
1036 HeapWord* result = eden()->par_allocate(word_size);
1037 if (result != NULL) {
1038 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1039 _next_gen->sample_eden_chunk();
1040 }
1041 return result;
1042 }
1043 do {
1044 HeapWord* old_limit = eden()->soft_end();
1045 if (old_limit < eden()->end()) {
1046 // Tell the next generation we reached a limit.
1047 HeapWord* new_limit =
1048 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1049 if (new_limit != NULL) {
1050 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1051 } else {
1052 assert(eden()->soft_end() == eden()->end(),
1053 "invalid state after allocation_limit_reached returned null");
1054 }
1055 } else {
1056 // The allocation failed and the soft limit is equal to the hard limit,
1057 // there are no reasons to do an attempt to allocate
1058 assert(old_limit == eden()->end(), "sanity check");
1059 break;
1060 }
1061 // Try to allocate until succeeded or the soft limit can't be adjusted
1062 result = eden()->par_allocate(word_size);
1063 } while (result == NULL);
1064
1065 // If the eden is full and the last collection bailed out, we are running
1066 // out of heap space, and we try to allocate the from-space, too.
1067 // allocate_from_space can't be inlined because that would introduce a
1068 // circular dependency at compile time.
1069 if (result == NULL) {
1070 result = allocate_from_space(word_size);
1071 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1072 _next_gen->sample_eden_chunk();
1073 }
1074 return result;
1075 }
1076
1077 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1078 bool is_tlab) {
1079 HeapWord* res = eden()->par_allocate(word_size);
1080 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1081 _next_gen->sample_eden_chunk();
1082 }
1083 return res;
1084 }
1085
1086 void DefNewGeneration::gc_prologue(bool full) {
1087 // Ensure that _end and _soft_end are the same in eden space.
1088 eden()->set_soft_end(eden()->end());
1089 }
1090
1091 size_t DefNewGeneration::tlab_capacity() const {
1092 return eden()->capacity();
1093 }
1094
1095 size_t DefNewGeneration::tlab_used() const {
1096 return eden()->used();
1097 }
1098
1099 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1100 return unsafe_max_alloc_nogc();
1101 }
|
39 #include "memory/iterator.hpp"
40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/atomic.inline.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/stack.inline.hpp"
51
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54 //
55 // DefNewGeneration functions.
56
57 // Methods of protected closure types.
58
59 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { }
60 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
61 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
62 }
63
64 DefNewGeneration::KeepAliveClosure::
65 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
66 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
67 _rs = (CardTableRS*)rs;
68 }
69
70 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
71 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
72
73
74 DefNewGeneration::FastKeepAliveClosure::
75 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
76 DefNewGeneration::KeepAliveClosure(cl) {
77 _boundary = g->reserved().end();
78 }
79
80 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
81 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
82
83 DefNewGeneration::EvacuateFollowersClosure::
84 EvacuateFollowersClosure(GenCollectedHeap* gch,
85 ScanClosure* cur, ScanClosure* older) :
86 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
87 {}
88
89 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
90 do {
91 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap,
92 _scan_older);
93 } while (!_gch->no_allocs_since_save_marks(Generation::Young));
94 }
95
96 DefNewGeneration::FastEvacuateFollowersClosure::
97 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
98 DefNewGeneration* gen,
99 FastScanClosure* cur, FastScanClosure* older) :
100 _gch(gch), _gen(gen), _scan_cur_or_nonheap(cur), _scan_older(older)
101 {}
102
103 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
104 do {
105 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap,
106 _scan_older);
107 } while (!_gch->no_allocs_since_save_marks(Generation::Young));
108 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
109 }
110
111 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
112 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
113 {
114 _boundary = _g->reserved().end();
115 }
116
117 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
118 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
119
120 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
122 {
123 _boundary = _g->reserved().end();
124 }
125
126 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
127 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
128
129 void KlassScanClosure::do_klass(Klass* klass) {
130 #ifndef PRODUCT
131 if (TraceScavenge) {
132 ResourceMark rm;
133 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
134 klass,
135 klass->external_name(),
136 klass->has_modified_oops() ? "true" : "false");
137 }
138 #endif
139
140 // If the klass has not been dirtied we know that there's
141 // no references into the young gen and we can skip it.
142 if (klass->has_modified_oops()) {
143 if (_accumulate_modified_oops) {
144 klass->accumulate_modified_oops();
145 }
146
147 // Clear this state since we're going to scavenge all the metadata.
148 klass->clear_modified_oops();
149
150 // Tell the closure which Klass is being scanned so that it can be dirtied
151 // if oops are left pointing into the young gen.
152 _scavenge_closure->set_scanned_klass(klass);
153
154 klass->oops_do(_scavenge_closure);
155
156 _scavenge_closure->set_scanned_klass(NULL);
157 }
158 }
159
160 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
161 _g(g)
162 {
163 _boundary = _g->reserved().end();
164 }
165
166 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
167 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
168
169 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
170 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
171
172 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
173 KlassRemSet* klass_rem_set)
174 : _scavenge_closure(scavenge_closure),
175 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
176
177
178 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
179 size_t initial_size,
180 const char* policy)
181 : Generation(rs, initial_size),
182 _promo_failure_drain_in_progress(false),
183 _should_allocate_from_space(false)
184 {
185 MemRegion cmr((HeapWord*)_virtual_space.low(),
186 (HeapWord*)_virtual_space.high());
187 Universe::heap()->barrier_set()->resize_covered_region(cmr);
188
189 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
190 _eden_space = new ConcEdenSpace(this);
191 } else {
192 _eden_space = new EdenSpace(this);
193 }
194 _from_space = new ContiguousSpace();
195 _to_space = new ContiguousSpace();
196
197 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
198 vm_exit_during_initialization("Could not allocate a new gen space");
199
200 // Compute the maximum eden and survivor space sizes. These sizes
201 // are computed assuming the entire reserved space is committed.
205 _max_survivor_size = compute_survivor_size(size, alignment);
206 _max_eden_size = size - (2*_max_survivor_size);
207
208 // allocate the performance counters
209 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
210
211 // Generation counters -- generation 0, 3 subspaces
212 _gen_counters = new GenerationCounters("new", 0, 3,
213 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
214 _gc_counters = new CollectorCounters(policy, 0);
215
216 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
217 _gen_counters);
218 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
219 _gen_counters);
220 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
221 _gen_counters);
222
223 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
224 update_counters();
225 _old_gen = NULL;
226 _tenuring_threshold = MaxTenuringThreshold;
227 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
228
229 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
230 }
231
232 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
233 bool clear_space,
234 bool mangle_space) {
235 uintx alignment =
236 GenCollectedHeap::heap()->collector_policy()->space_alignment();
237
238 // If the spaces are being cleared (only done at heap initialization
239 // currently), the survivor spaces need not be empty.
240 // Otherwise, no care is taken for used areas in the survivor spaces
241 // so check.
242 assert(clear_space || (to()->is_empty() && from()->is_empty()),
243 "Initialization of the survivor spaces assumes these are empty");
244
245 // Compute sizes
357 if (GC_locker::is_active()) {
358 if (PrintGC && Verbose) {
359 gclog_or_tty->print_cr("Garbage collection disabled, "
360 "expanded heap instead");
361 }
362 }
363
364 return success;
365 }
366
367
368 void DefNewGeneration::compute_new_size() {
369 // This is called after a gc that includes the following generation
370 // (which is required to exist.) So from-space will normally be empty.
371 // Note that we check both spaces, since if scavenge failed they revert roles.
372 // If not we bail out (otherwise we would have to relocate the objects)
373 if (!from()->is_empty() || !to()->is_empty()) {
374 return;
375 }
376
377 GenCollectedHeap* gch = GenCollectedHeap::heap();
378
379 size_t old_size = gch->old_gen()->capacity();
380 size_t new_size_before = _virtual_space.committed_size();
381 size_t min_new_size = spec()->init_size();
382 size_t max_new_size = reserved().byte_size();
383 assert(min_new_size <= new_size_before &&
384 new_size_before <= max_new_size,
385 "just checking");
386 // All space sizes must be multiples of Generation::GenGrain.
387 size_t alignment = Generation::GenGrain;
388
389 // Compute desired new generation size based on NewRatio and
390 // NewSizeThreadIncrease
391 size_t desired_new_size = old_size/NewRatio;
392 int threads_count = Threads::number_of_non_daemon_threads();
393 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
394 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
395
396 // Adjust new generation size
397 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
398 assert(desired_new_size <= max_new_size, "just checking");
399
543 }
544
545 void DefNewGeneration::adjust_desired_tenuring_threshold() {
546 // Set the desired survivor size to half the real survivor space
547 _tenuring_threshold =
548 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
549 }
550
551 void DefNewGeneration::collect(bool full,
552 bool clear_all_soft_refs,
553 size_t size,
554 bool is_tlab) {
555 assert(full || size > 0, "otherwise we don't want to collect");
556
557 GenCollectedHeap* gch = GenCollectedHeap::heap();
558
559 _gc_timer->register_gc_start();
560 DefNewTracer gc_tracer;
561 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
562
563 _old_gen = gch->old_gen();
564
565 // If the next generation is too full to accommodate promotion
566 // from this generation, pass on collection; let the next generation
567 // do it.
568 if (!collection_attempt_is_safe()) {
569 if (Verbose && PrintGCDetails) {
570 gclog_or_tty->print(" :: Collection attempt not safe :: ");
571 }
572 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
573 return;
574 }
575 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
576
577 init_assuming_no_promotion_failure();
578
579 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
580 // Capture heap used before collection (for printing).
581 size_t gch_prev_used = gch->used();
582
583 gch->trace_heap_before_gc(&gc_tracer);
584
585 SpecializationStats::clear();
586
587 // These can be shared for all code paths
588 IsAliveClosure is_alive(this);
589 ScanWeakRefClosure scan_weak_ref(this);
590
591 age_table()->clear();
592 to()->clear(SpaceDecorator::Mangle);
593
594 gch->rem_set()->prepare_for_younger_refs_iterate(false);
595
596 assert(gch->no_allocs_since_save_marks(Generation::Young),
597 "save marks have not been newly set.");
598
599 // Not very pretty.
600 CollectorPolicy* cp = gch->collector_policy();
601
602 FastScanClosure fsc_with_no_gc_barrier(this, false);
603 FastScanClosure fsc_with_gc_barrier(this, true);
604
605 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
606 gch->rem_set()->klass_rem_set());
607 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
608 &fsc_with_no_gc_barrier,
609 false);
610
611 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
612 FastEvacuateFollowersClosure evacuate_followers(gch, this,
613 &fsc_with_no_gc_barrier,
614 &fsc_with_gc_barrier);
615
616 assert(gch->no_allocs_since_save_marks(Generation::Young),
617 "save marks have not been newly set.");
618
619 gch->gen_process_roots(Generation::Young,
620 true, // Process younger gens, if any,
621 // as strong roots.
622 true, // activate StrongRootsScope
623 SharedHeap::SO_ScavengeCodeCache,
624 GenCollectedHeap::StrongAndWeakRoots,
625 &fsc_with_no_gc_barrier,
626 &fsc_with_gc_barrier,
627 &cld_scan_closure);
628
629 // "evacuate followers".
630 evacuate_followers.do_void();
631
632 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
633 ReferenceProcessor* rp = ref_processor();
634 rp->setup_policy(clear_all_soft_refs);
635 const ReferenceProcessorStats& stats =
636 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
637 NULL, _gc_timer, gc_tracer.gc_id());
638 gc_tracer.report_gc_reference_stats(stats);
639
663 size_policy->reset_gc_overhead_limit_count();
664 assert(!gch->incremental_collection_failed(), "Should be clear");
665 } else {
666 assert(_promo_failure_scan_stack.is_empty(), "post condition");
667 _promo_failure_scan_stack.clear(true); // Clear cached segments.
668
669 remove_forwarding_pointers();
670 if (PrintGCDetails) {
671 gclog_or_tty->print(" (promotion failed) ");
672 }
673 // Add to-space to the list of space to compact
674 // when a promotion failure has occurred. In that
675 // case there can be live objects in to-space
676 // as a result of a partial evacuation of eden
677 // and from-space.
678 swap_spaces(); // For uniformity wrt ParNewGeneration.
679 from()->set_next_compaction_space(to());
680 gch->set_incremental_collection_failed();
681
682 // Inform the next generation that a promotion failure occurred.
683 _old_gen->promotion_failure_occurred();
684 gc_tracer.report_promotion_failed(_promotion_failed_info);
685
686 // Reset the PromotionFailureALot counters.
687 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
688 }
689 if (PrintGC && !PrintGCDetails) {
690 gch->print_heap_change(gch_prev_used);
691 }
692 // set new iteration safe limit for the survivor spaces
693 from()->set_concurrent_iteration_safe_limit(from()->top());
694 to()->set_concurrent_iteration_safe_limit(to()->top());
695 SpecializationStats::print();
696
697 // We need to use a monotonically non-decreasing time in ms
698 // or we will see time-warp warnings and os::javaTimeMillis()
699 // does not guarantee monotonicity.
700 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
701 update_time_of_last_gc(now);
702
703 gch->trace_heap_after_gc(&gc_tracer);
768 // prevent recursion in copy_to_survivor_space()
769 _promo_failure_drain_in_progress = true;
770 drain_promo_failure_scan_stack();
771 _promo_failure_drain_in_progress = false;
772 }
773 }
774
775 oop DefNewGeneration::copy_to_survivor_space(oop old) {
776 assert(is_in_reserved(old) && !old->is_forwarded(),
777 "shouldn't be scavenging this oop");
778 size_t s = old->size();
779 oop obj = NULL;
780
781 // Try allocating obj in to-space (unless too old)
782 if (old->age() < tenuring_threshold()) {
783 obj = (oop) to()->allocate_aligned(s);
784 }
785
786 // Otherwise try allocating obj tenured
787 if (obj == NULL) {
788 obj = _old_gen->promote(old, s);
789 if (obj == NULL) {
790 handle_promotion_failure(old);
791 return old;
792 }
793 } else {
794 // Prefetch beyond obj
795 const intx interval = PrefetchCopyIntervalInBytes;
796 Prefetch::write(obj, interval);
797
798 // Copy obj
799 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
800
801 // Increment age if obj still in new generation
802 obj->incr_age();
803 age_table()->add(obj, s);
804 }
805
806 // Done, insert forward pointer to obj in this header
807 old->forward_to(obj);
808
837 }
838
839 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
840 \
841 void DefNewGeneration:: \
842 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
843 cl->set_generation(this); \
844 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
845 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
846 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
847 cl->reset_generation(); \
848 save_marks(); \
849 }
850
851 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
852
853 #undef DefNew_SINCE_SAVE_MARKS_DEFN
854
855 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
856 size_t max_alloc_words) {
857 if (requestor == this || _promotion_failed) {
858 return;
859 }
860 assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation");
861
862 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
863 if (to_space->top() > to_space->bottom()) {
864 trace("to_space not empty when contribute_scratch called");
865 }
866 */
867
868 ContiguousSpace* to_space = to();
869 assert(to_space->end() >= to_space->top(), "pointers out of order");
870 size_t free_words = pointer_delta(to_space->end(), to_space->top());
871 if (free_words >= MinFreeScratchWords) {
872 ScratchBlock* sb = (ScratchBlock*)to_space->top();
873 sb->num_words = free_words;
874 sb->next = list;
875 list = sb;
876 }
877 }
878
879 void DefNewGeneration::reset_scratch() {
880 // If contributing scratch in to_space, mangle all of
881 // to_space if ZapUnusedHeapArea. This is needed because
882 // top is not maintained while using to-space as scratch.
883 if (ZapUnusedHeapArea) {
884 to()->mangle_unused_area_complete();
885 }
886 }
887
888 bool DefNewGeneration::collection_attempt_is_safe() {
889 if (!to()->is_empty()) {
890 if (Verbose && PrintGCDetails) {
891 gclog_or_tty->print(" :: to is not empty :: ");
892 }
893 return false;
894 }
895 if (_old_gen == NULL) {
896 GenCollectedHeap* gch = GenCollectedHeap::heap();
897 _old_gen = gch->old_gen();
898 }
899 return _old_gen->promotion_attempt_is_safe(used());
900 }
901
902 void DefNewGeneration::gc_epilogue(bool full) {
903 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
904
905 assert(!GC_locker::is_active(), "We should not be executing here");
906 // Check if the heap is approaching full after a collection has
907 // been done. Generally the young generation is empty at
908 // a minimum at the end of a collection. If it is not, then
909 // the heap is approaching full.
910 GenCollectedHeap* gch = GenCollectedHeap::heap();
911 if (full) {
912 DEBUG_ONLY(seen_incremental_collection_failed = false;)
913 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
914 if (Verbose && PrintGCDetails) {
915 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
916 GCCause::to_string(gch->gc_cause()));
917 }
918 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
919 set_should_allocate_from_space(); // we seem to be running out of space
999 void DefNewGeneration::print_on(outputStream* st) const {
1000 Generation::print_on(st);
1001 st->print(" eden");
1002 eden()->print_on(st);
1003 st->print(" from");
1004 from()->print_on(st);
1005 st->print(" to ");
1006 to()->print_on(st);
1007 }
1008
1009
1010 const char* DefNewGeneration::name() const {
1011 return "def new generation";
1012 }
1013
1014 // Moved from inline file as they are not called inline
1015 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1016 return eden();
1017 }
1018
1019 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1020 // This is the slow-path allocation for the DefNewGeneration.
1021 // Most allocations are fast-path in compiled code.
1022 // We try to allocate from the eden. If that works, we are happy.
1023 // Note that since DefNewGeneration supports lock-free allocation, we
1024 // have to use it here, as well.
1025 HeapWord* result = eden()->par_allocate(word_size);
1026 if (result != NULL) {
1027 if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1028 _old_gen->sample_eden_chunk();
1029 }
1030 return result;
1031 }
1032 do {
1033 HeapWord* old_limit = eden()->soft_end();
1034 if (old_limit < eden()->end()) {
1035 // Tell the old generation we reached a limit.
1036 HeapWord* new_limit =
1037 _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size);
1038 if (new_limit != NULL) {
1039 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1040 } else {
1041 assert(eden()->soft_end() == eden()->end(),
1042 "invalid state after allocation_limit_reached returned null");
1043 }
1044 } else {
1045 // The allocation failed and the soft limit is equal to the hard limit,
1046 // there are no reasons to do an attempt to allocate
1047 assert(old_limit == eden()->end(), "sanity check");
1048 break;
1049 }
1050 // Try to allocate until succeeded or the soft limit can't be adjusted
1051 result = eden()->par_allocate(word_size);
1052 } while (result == NULL);
1053
1054 // If the eden is full and the last collection bailed out, we are running
1055 // out of heap space, and we try to allocate the from-space, too.
1056 // allocate_from_space can't be inlined because that would introduce a
1057 // circular dependency at compile time.
1058 if (result == NULL) {
1059 result = allocate_from_space(word_size);
1060 } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1061 _old_gen->sample_eden_chunk();
1062 }
1063 return result;
1064 }
1065
1066 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1067 bool is_tlab) {
1068 HeapWord* res = eden()->par_allocate(word_size);
1069 if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1070 _old_gen->sample_eden_chunk();
1071 }
1072 return res;
1073 }
1074
1075 void DefNewGeneration::gc_prologue(bool full) {
1076 // Ensure that _end and _soft_end are the same in eden space.
1077 eden()->set_soft_end(eden()->end());
1078 }
1079
1080 size_t DefNewGeneration::tlab_capacity() const {
1081 return eden()->capacity();
1082 }
1083
1084 size_t DefNewGeneration::tlab_used() const {
1085 return eden()->used();
1086 }
1087
1088 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1089 return unsafe_max_alloc_nogc();
1090 }
|