24
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/gcTimer.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/blockOffsetTable.inline.hpp"
30 #include "memory/cardGeneration.inline.hpp"
31 #include "memory/generationSpec.hpp"
32 #include "memory/genMarkSweep.hpp"
33 #include "memory/genOopClosures.inline.hpp"
34 #include "memory/space.hpp"
35 #include "memory/tenuredGeneration.inline.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/java.hpp"
38 #include "utilities/macros.hpp"
39 #if INCLUDE_ALL_GCS
40 #include "gc_implementation/parNew/parOopClosures.hpp"
41 #endif
42
43 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
44 size_t initial_byte_size, int level,
45 GenRemSet* remset) :
46 CardGeneration(rs, initial_byte_size, level, remset)
47 {
48 HeapWord* bottom = (HeapWord*) _virtual_space.low();
49 HeapWord* end = (HeapWord*) _virtual_space.high();
50 _the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
51 _the_space->reset_saved_mark();
52 _shrink_factor = 0;
53 _capacity_at_prologue = 0;
54
55 _gc_stats = new GCStats();
56
57 // initialize performance counters
58
59 const char* gen_name = "old";
60 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
61
62 // Generation Counters -- generation 1, 1 subspace
63 _gen_counters = new GenerationCounters(gen_name, 1, 1,
64 gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
65
66 _gc_counters = new CollectorCounters("MSC", 1);
117 _capacity_at_prologue, capacity());
118 }
119 }
120 return result;
121 }
122
123 void TenuredGeneration::compute_new_size() {
124 assert_locked_or_safepoint(Heap_lock);
125
126 // Compute some numbers about the state of the heap.
127 const size_t used_after_gc = used();
128 const size_t capacity_after_gc = capacity();
129
130 CardGeneration::compute_new_size();
131
132 assert(used() == used_after_gc && used_after_gc <= capacity(),
133 err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
134 " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
135 }
136
137 void TenuredGeneration::update_gc_stats(int current_level,
138 bool full) {
139 // If the next lower level(s) has been collected, gather any statistics
140 // that are of interest at this point.
141 if (!full && (current_level + 1) == level()) {
142 // Calculate size of data promoted from the younger generations
143 // before doing the collection.
144 size_t used_before_gc = used();
145
146 // If the younger gen collections were skipped, then the
147 // number of promoted bytes will be 0 and adding it to the
148 // average will incorrectly lessen the average. It is, however,
149 // also possible that no promotion was needed.
150 if (used_before_gc >= _used_at_prologue) {
151 size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
152 gc_stats()->avg_promoted()->sample(promoted_in_bytes);
153 }
154 }
155 }
156
157 void TenuredGeneration::update_counters() {
158 if (UsePerfData) {
159 _space_counters->update_all();
160 _gen_counters->update_all();
161 }
175 return res;
176 }
177
178 void TenuredGeneration::collect(bool full,
179 bool clear_all_soft_refs,
180 size_t size,
181 bool is_tlab) {
182 GenCollectedHeap* gch = GenCollectedHeap::heap();
183
184 // Temporarily expand the span of our ref processor, so
185 // refs discovery is over the entire heap, not just this generation
186 ReferenceProcessorSpanMutator
187 x(ref_processor(), gch->reserved_region());
188
189 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
190 gc_timer->register_gc_start();
191
192 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
193 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
194
195 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
196
197 gc_timer->register_gc_end();
198
199 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
200 }
201
202 HeapWord*
203 TenuredGeneration::expand_and_allocate(size_t word_size,
204 bool is_tlab,
205 bool parallel) {
206 assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
207 if (parallel) {
208 MutexLocker x(ParGCRareEvent_lock);
209 HeapWord* result = NULL;
210 size_t byte_size = word_size * HeapWordSize;
211 while (true) {
212 expand(byte_size, _min_heap_delta_bytes);
213 if (GCExpandToAllocateDelayMillis > 0) {
214 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
215 }
|
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/gcTimer.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/blockOffsetTable.inline.hpp"
30 #include "memory/cardGeneration.inline.hpp"
31 #include "memory/generationSpec.hpp"
32 #include "memory/genMarkSweep.hpp"
33 #include "memory/genOopClosures.inline.hpp"
34 #include "memory/space.hpp"
35 #include "memory/tenuredGeneration.inline.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/java.hpp"
38 #include "utilities/macros.hpp"
39 #if INCLUDE_ALL_GCS
40 #include "gc_implementation/parNew/parOopClosures.hpp"
41 #endif
42
43 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
44 size_t initial_byte_size,
45 GenRemSet* remset) :
46 CardGeneration(rs, initial_byte_size, remset)
47 {
48 HeapWord* bottom = (HeapWord*) _virtual_space.low();
49 HeapWord* end = (HeapWord*) _virtual_space.high();
50 _the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
51 _the_space->reset_saved_mark();
52 _shrink_factor = 0;
53 _capacity_at_prologue = 0;
54
55 _gc_stats = new GCStats();
56
57 // initialize performance counters
58
59 const char* gen_name = "old";
60 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
61
62 // Generation Counters -- generation 1, 1 subspace
63 _gen_counters = new GenerationCounters(gen_name, 1, 1,
64 gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
65
66 _gc_counters = new CollectorCounters("MSC", 1);
117 _capacity_at_prologue, capacity());
118 }
119 }
120 return result;
121 }
122
123 void TenuredGeneration::compute_new_size() {
124 assert_locked_or_safepoint(Heap_lock);
125
126 // Compute some numbers about the state of the heap.
127 const size_t used_after_gc = used();
128 const size_t capacity_after_gc = capacity();
129
130 CardGeneration::compute_new_size();
131
132 assert(used() == used_after_gc && used_after_gc <= capacity(),
133 err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
134 " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
135 }
136
137 void TenuredGeneration::update_gc_stats(Generation* current_generation,
138 bool full) {
139 // If the young generation has been collected, gather any statistics
140 // that are of interest at this point.
141 bool current_is_young = (current_generation == GenCollectedHeap::heap()->young_gen());
142 if (!full && current_is_young) {
143 // Calculate size of data promoted from the younger generations
144 // before doing the collection.
145 size_t used_before_gc = used();
146
147 // If the younger gen collections were skipped, then the
148 // number of promoted bytes will be 0 and adding it to the
149 // average will incorrectly lessen the average. It is, however,
150 // also possible that no promotion was needed.
151 if (used_before_gc >= _used_at_prologue) {
152 size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
153 gc_stats()->avg_promoted()->sample(promoted_in_bytes);
154 }
155 }
156 }
157
158 void TenuredGeneration::update_counters() {
159 if (UsePerfData) {
160 _space_counters->update_all();
161 _gen_counters->update_all();
162 }
176 return res;
177 }
178
179 void TenuredGeneration::collect(bool full,
180 bool clear_all_soft_refs,
181 size_t size,
182 bool is_tlab) {
183 GenCollectedHeap* gch = GenCollectedHeap::heap();
184
185 // Temporarily expand the span of our ref processor, so
186 // refs discovery is over the entire heap, not just this generation
187 ReferenceProcessorSpanMutator
188 x(ref_processor(), gch->reserved_region());
189
190 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
191 gc_timer->register_gc_start();
192
193 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
194 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
195
196 GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
197
198 gc_timer->register_gc_end();
199
200 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
201 }
202
203 HeapWord*
204 TenuredGeneration::expand_and_allocate(size_t word_size,
205 bool is_tlab,
206 bool parallel) {
207 assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
208 if (parallel) {
209 MutexLocker x(ParGCRareEvent_lock);
210 HeapWord* result = NULL;
211 size_t byte_size = word_size * HeapWordSize;
212 while (true) {
213 expand(byte_size, _min_heap_delta_bytes);
214 if (GCExpandToAllocateDelayMillis > 0) {
215 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
216 }
|