50 G1ParPushHeapRSClosure* cl,
51 CardTableModRefBS::PrecisionStyle precision) :
52 DirtyCardToOopClosure(hr, cl, precision, NULL),
53 _hr(hr), _rs_scan(cl), _g1(g1) { }
54
55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
56 OopClosure* oc) :
57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
58
59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
60 HeapWord* bottom,
61 HeapWord* top) {
62 G1CollectedHeap* g1h = _g1;
63 size_t oop_size;
64 HeapWord* cur = bottom;
65
66 // Start filtering what we add to the remembered set. If the object is
67 // not considered dead, either because it is marked (in the mark bitmap)
68 // or it was allocated after marking finished, then we add it. Otherwise
69 // we can safely ignore the object.
70 if (!g1h->is_obj_dead(oop(cur), _hr)) {
71 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
72 } else {
73 oop_size = _hr->block_size(cur);
74 }
75
76 cur += oop_size;
77
78 if (cur < top) {
79 oop cur_oop = oop(cur);
80 oop_size = _hr->block_size(cur);
81 HeapWord* next_obj = cur + oop_size;
82 while (next_obj < top) {
83 // Keep filtering the remembered set.
84 if (!g1h->is_obj_dead(cur_oop, _hr)) {
85 // Bottom lies entirely below top, so we can call the
86 // non-memRegion version of oop_iterate below.
87 cur_oop->oop_iterate(_rs_scan);
88 }
89 cur = next_obj;
90 cur_oop = oop(cur);
91 oop_size = _hr->block_size(cur);
92 next_obj = cur + oop_size;
93 }
94
95 // Last object. Need to do dead-obj filtering here too.
96 if (!g1h->is_obj_dead(oop(cur), _hr)) {
97 oop(cur)->oop_iterate(_rs_scan, mr);
98 }
99 }
100 }
101
102 size_t HeapRegion::max_region_size() {
103 return HeapRegionBounds::max_size();
104 }
105
106 size_t HeapRegion::min_region_size_in_words() {
107 return HeapRegionBounds::min_size() >> LogHeapWordSize;
108 }
109
110 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
111 size_t region_size = G1HeapRegionSize;
112 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
113 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
114 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
115 HeapRegionBounds::min_size());
116 }
145
146 guarantee(GrainWords == 0, "we should only set it once");
147 GrainWords = GrainBytes >> LogHeapWordSize;
148 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
149
150 guarantee(CardsPerRegion == 0, "we should only set it once");
151 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
152 }
153
154 void HeapRegion::reset_after_compaction() {
155 G1OffsetTableContigSpace::reset_after_compaction();
156 // After a compaction the mark bitmap is invalid, so we must
157 // treat all objects as being inside the unmarked area.
158 zero_marked_bytes();
159 init_top_at_mark_start();
160 }
161
162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
163 assert(_humongous_start_region == NULL,
164 "we should have already filtered out humongous regions");
165 assert(_end == orig_end(),
166 "we should have already filtered out humongous regions");
167 assert(!in_collection_set(),
168 "Should not clear heap region %u in the collection set", hrm_index());
169
170 set_allocation_context(AllocationContext::system());
171 set_young_index_in_cset(-1);
172 uninstall_surv_rate_group();
173 set_free();
174 reset_pre_dummy_top();
175
176 if (!par) {
177 // If this is parallel, this will be done later.
178 HeapRegionRemSet* hrrs = rem_set();
179 if (locked) {
180 hrrs->clear_locked();
181 } else {
182 hrrs->clear();
183 }
184 }
185 zero_marked_bytes();
186
196 hrrs->clear();
197 CardTableModRefBS* ct_bs =
198 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
199 ct_bs->clear(MemRegion(bottom(), end()));
200 }
201
202 void HeapRegion::calc_gc_efficiency() {
203 // GC efficiency is the ratio of how much space would be
204 // reclaimed over how long we predict it would take to reclaim it.
205 G1CollectedHeap* g1h = G1CollectedHeap::heap();
206 G1CollectorPolicy* g1p = g1h->g1_policy();
207
208 // Retrieve a prediction of the elapsed time for this region for
209 // a mixed gc because the region will only be evacuated during a
210 // mixed gc.
211 double region_elapsed_time_ms =
212 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
213 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
214 }
215
216 void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
217 assert(!is_humongous(), "sanity / pre-condition");
218 assert(end() == orig_end(),
219 "Should be normal before the humongous object allocation");
220 assert(top() == bottom(), "should be empty");
221 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
222
223 _type.set_starts_humongous();
224 _humongous_start_region = this;
225
226 set_end(new_end);
227 _offsets.set_for_starts_humongous(new_top);
228 }
229
230 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
231 assert(!is_humongous(), "sanity / pre-condition");
232 assert(end() == orig_end(),
233 "Should be normal before the humongous object allocation");
234 assert(top() == bottom(), "should be empty");
235 assert(first_hr->is_starts_humongous(), "pre-condition");
236
237 _type.set_continues_humongous();
238 _humongous_start_region = first_hr;
239 }
240
241 void HeapRegion::clear_humongous() {
242 assert(is_humongous(), "pre-condition");
243
244 if (is_starts_humongous()) {
245 assert(top() <= end(), "pre-condition");
246 set_end(orig_end());
247 if (top() > end()) {
248 // at least one "continues humongous" region after it
249 set_top(end());
250 }
251 } else {
252 // continues humongous
253 assert(end() == orig_end(), "sanity");
254 }
255
256 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
257 _humongous_start_region = NULL;
258 }
259
260 HeapRegion::HeapRegion(uint hrm_index,
261 G1BlockOffsetSharedArray* sharedOffsetArray,
262 MemRegion mr) :
263 G1OffsetTableContigSpace(sharedOffsetArray, mr),
264 _hrm_index(hrm_index),
265 _allocation_context(AllocationContext::system()),
266 _humongous_start_region(NULL),
267 _next_in_special_set(NULL),
268 _evacuation_failed(false),
269 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
270 _next_young_region(NULL),
271 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
272 #ifdef ASSERT
273 _containing_set(NULL),
274 #endif // ASSERT
275 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
276 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
277 _predicted_bytes_to_copy(0)
278 {
279 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
280 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
281
282 initialize(mr);
283 }
284
285 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
286 assert(_rem_set->is_empty(), "Remembered set must be empty");
287
288 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
289
290 hr_clear(false /*par*/, false /*clear_space*/);
291 set_top(bottom());
292 record_timestamp();
293
294 assert(mr.end() == orig_end(),
295 "Given region end address " PTR_FORMAT " should match exactly "
296 "bottom plus one region size, i.e. " PTR_FORMAT,
297 p2i(mr.end()), p2i(orig_end()));
298 }
299
300 CompactibleSpace* HeapRegion::next_compaction_space() const {
301 return G1CollectedHeap::heap()->next_compaction_region(this);
302 }
303
304 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
305 bool during_conc_mark) {
306 // We always recreate the prev marking info and we'll explicitly
307 // mark all objects we find to be self-forwarded on the prev
308 // bitmap. So all objects need to be below PTAMS.
309 _prev_marked_bytes = 0;
310
311 if (during_initial_mark) {
312 // During initial-mark, we'll also explicitly mark all objects
313 // we find to be self-forwarded on the next bitmap. So all
314 // objects need to be below NTAMS.
315 _next_top_at_mark_start = top();
316 _next_marked_bytes = 0;
317 } else if (during_conc_mark) {
815 }
816 if (G1MaxVerifyFailures >= 0 &&
817 vl_cl.n_failures() >= G1MaxVerifyFailures) {
818 return;
819 }
820 }
821 } else {
822 gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj));
823 *failures = true;
824 return;
825 }
826 }
827 prev_p = p;
828 p += obj_size;
829 }
830
831 if (!is_young() && !is_empty()) {
832 _offsets.verify();
833 }
834
835 if (p != top()) {
836 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
837 "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
838 *failures = true;
839 return;
840 }
841
842 HeapWord* the_end = end();
843 assert(p == top(), "it should still hold");
844 // Do some extra BOT consistency checking for addresses in the
845 // range [top, end). BOT look-ups in this range should yield
846 // top. No point in doing that if top == end (there's nothing there).
847 if (p < the_end) {
848 // Look up top
849 HeapWord* addr_1 = p;
850 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
851 if (b_start_1 != p) {
852 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
853 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
854 p2i(addr_1), p2i(b_start_1), p2i(p));
855 *failures = true;
856 return;
857 }
858
859 // Look up top + 1
860 HeapWord* addr_2 = p + 1;
861 if (addr_2 < the_end) {
862 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
863 if (b_start_2 != p) {
914 void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
915 scan_and_forward(this, cp);
916 }
917
918 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
919 // away eventually.
920
921 void G1OffsetTableContigSpace::clear(bool mangle_space) {
922 set_top(bottom());
923 _scan_top = bottom();
924 CompactibleSpace::clear(mangle_space);
925 reset_bot();
926 }
927
928 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
929 Space::set_bottom(new_bottom);
930 _offsets.set_bottom(new_bottom);
931 }
932
933 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
934 Space::set_end(new_end);
935 _offsets.resize(new_end - bottom());
936 }
937
938 #ifndef PRODUCT
939 void G1OffsetTableContigSpace::mangle_unused_area() {
940 mangle_unused_area_complete();
941 }
942
943 void G1OffsetTableContigSpace::mangle_unused_area_complete() {
944 SpaceMangler::mangle_region(MemRegion(top(), end()));
945 }
946 #endif
947
948 void G1OffsetTableContigSpace::print() const {
949 print_short();
950 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
951 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
952 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
953 }
|
50 G1ParPushHeapRSClosure* cl,
51 CardTableModRefBS::PrecisionStyle precision) :
52 DirtyCardToOopClosure(hr, cl, precision, NULL),
53 _hr(hr), _rs_scan(cl), _g1(g1) { }
54
55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
56 OopClosure* oc) :
57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
58
59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
60 HeapWord* bottom,
61 HeapWord* top) {
62 G1CollectedHeap* g1h = _g1;
63 size_t oop_size;
64 HeapWord* cur = bottom;
65
66 // Start filtering what we add to the remembered set. If the object is
67 // not considered dead, either because it is marked (in the mark bitmap)
68 // or it was allocated after marking finished, then we add it. Otherwise
69 // we can safely ignore the object.
70 if (!g1h->is_obj_dead(oop(cur))) {
71 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
72 } else {
73 oop_size = _hr->block_size(cur);
74 }
75
76 cur += oop_size;
77
78 if (cur < top) {
79 oop cur_oop = oop(cur);
80 oop_size = _hr->block_size(cur);
81 HeapWord* next_obj = cur + oop_size;
82 while (next_obj < top) {
83 // Keep filtering the remembered set.
84 if (!g1h->is_obj_dead(cur_oop)) {
85 // Bottom lies entirely below top, so we can call the
86 // non-memRegion version of oop_iterate below.
87 cur_oop->oop_iterate(_rs_scan);
88 }
89 cur = next_obj;
90 cur_oop = oop(cur);
91 oop_size = _hr->block_size(cur);
92 next_obj = cur + oop_size;
93 }
94
95 // Last object. Need to do dead-obj filtering here too.
96 if (!g1h->is_obj_dead(oop(cur))) {
97 oop(cur)->oop_iterate(_rs_scan, mr);
98 }
99 }
100 }
101
102 size_t HeapRegion::max_region_size() {
103 return HeapRegionBounds::max_size();
104 }
105
106 size_t HeapRegion::min_region_size_in_words() {
107 return HeapRegionBounds::min_size() >> LogHeapWordSize;
108 }
109
110 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
111 size_t region_size = G1HeapRegionSize;
112 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
113 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
114 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
115 HeapRegionBounds::min_size());
116 }
145
146 guarantee(GrainWords == 0, "we should only set it once");
147 GrainWords = GrainBytes >> LogHeapWordSize;
148 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
149
150 guarantee(CardsPerRegion == 0, "we should only set it once");
151 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
152 }
153
154 void HeapRegion::reset_after_compaction() {
155 G1OffsetTableContigSpace::reset_after_compaction();
156 // After a compaction the mark bitmap is invalid, so we must
157 // treat all objects as being inside the unmarked area.
158 zero_marked_bytes();
159 init_top_at_mark_start();
160 }
161
162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
163 assert(_humongous_start_region == NULL,
164 "we should have already filtered out humongous regions");
165 assert(!in_collection_set(),
166 "Should not clear heap region %u in the collection set", hrm_index());
167
168 set_allocation_context(AllocationContext::system());
169 set_young_index_in_cset(-1);
170 uninstall_surv_rate_group();
171 set_free();
172 reset_pre_dummy_top();
173
174 if (!par) {
175 // If this is parallel, this will be done later.
176 HeapRegionRemSet* hrrs = rem_set();
177 if (locked) {
178 hrrs->clear_locked();
179 } else {
180 hrrs->clear();
181 }
182 }
183 zero_marked_bytes();
184
194 hrrs->clear();
195 CardTableModRefBS* ct_bs =
196 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
197 ct_bs->clear(MemRegion(bottom(), end()));
198 }
199
200 void HeapRegion::calc_gc_efficiency() {
201 // GC efficiency is the ratio of how much space would be
202 // reclaimed over how long we predict it would take to reclaim it.
203 G1CollectedHeap* g1h = G1CollectedHeap::heap();
204 G1CollectorPolicy* g1p = g1h->g1_policy();
205
206 // Retrieve a prediction of the elapsed time for this region for
207 // a mixed gc because the region will only be evacuated during a
208 // mixed gc.
209 double region_elapsed_time_ms =
210 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
211 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
212 }
213
214 void HeapRegion::set_starts_humongous(HeapWord* obj_top) {
215 assert(!is_humongous(), "sanity / pre-condition");
216 assert(top() == bottom(), "should be empty");
217
218 _type.set_starts_humongous();
219 _humongous_start_region = this;
220
221 _offsets.set_for_starts_humongous(obj_top);
222 }
223
224 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
225 assert(!is_humongous(), "sanity / pre-condition");
226 assert(top() == bottom(), "should be empty");
227 assert(first_hr->is_starts_humongous(), "pre-condition");
228
229 _type.set_continues_humongous();
230 _humongous_start_region = first_hr;
231 }
232
233 void HeapRegion::clear_humongous() {
234 assert(is_humongous(), "pre-condition");
235
236 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
237 _humongous_start_region = NULL;
238 }
239
240 HeapRegion::HeapRegion(uint hrm_index,
241 G1BlockOffsetSharedArray* sharedOffsetArray,
242 MemRegion mr) :
243 G1OffsetTableContigSpace(sharedOffsetArray, mr),
244 _hrm_index(hrm_index),
245 _allocation_context(AllocationContext::system()),
246 _humongous_start_region(NULL),
247 _next_in_special_set(NULL),
248 _evacuation_failed(false),
249 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
250 _next_young_region(NULL),
251 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
252 #ifdef ASSERT
253 _containing_set(NULL),
254 #endif // ASSERT
255 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
256 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
257 _predicted_bytes_to_copy(0)
258 {
259 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
260 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
261
262 initialize(mr);
263 }
264
265 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
266 assert(_rem_set->is_empty(), "Remembered set must be empty");
267
268 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
269
270 hr_clear(false /*par*/, false /*clear_space*/);
271 set_top(bottom());
272 record_timestamp();
273 }
274
275 CompactibleSpace* HeapRegion::next_compaction_space() const {
276 return G1CollectedHeap::heap()->next_compaction_region(this);
277 }
278
279 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
280 bool during_conc_mark) {
281 // We always recreate the prev marking info and we'll explicitly
282 // mark all objects we find to be self-forwarded on the prev
283 // bitmap. So all objects need to be below PTAMS.
284 _prev_marked_bytes = 0;
285
286 if (during_initial_mark) {
287 // During initial-mark, we'll also explicitly mark all objects
288 // we find to be self-forwarded on the next bitmap. So all
289 // objects need to be below NTAMS.
290 _next_top_at_mark_start = top();
291 _next_marked_bytes = 0;
292 } else if (during_conc_mark) {
790 }
791 if (G1MaxVerifyFailures >= 0 &&
792 vl_cl.n_failures() >= G1MaxVerifyFailures) {
793 return;
794 }
795 }
796 } else {
797 gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj));
798 *failures = true;
799 return;
800 }
801 }
802 prev_p = p;
803 p += obj_size;
804 }
805
806 if (!is_young() && !is_empty()) {
807 _offsets.verify();
808 }
809
810 if (is_region_humongous) {
811 oop obj = oop(this->humongous_start_region()->bottom());
812 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
813 gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
814 }
815 }
816
817 if (!is_region_humongous && p != top()) {
818 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
819 "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
820 *failures = true;
821 return;
822 }
823
824 HeapWord* the_end = end();
825 // Do some extra BOT consistency checking for addresses in the
826 // range [top, end). BOT look-ups in this range should yield
827 // top. No point in doing that if top == end (there's nothing there).
828 if (p < the_end) {
829 // Look up top
830 HeapWord* addr_1 = p;
831 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
832 if (b_start_1 != p) {
833 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
834 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
835 p2i(addr_1), p2i(b_start_1), p2i(p));
836 *failures = true;
837 return;
838 }
839
840 // Look up top + 1
841 HeapWord* addr_2 = p + 1;
842 if (addr_2 < the_end) {
843 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
844 if (b_start_2 != p) {
895 void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
896 scan_and_forward(this, cp);
897 }
898
899 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
900 // away eventually.
901
902 void G1OffsetTableContigSpace::clear(bool mangle_space) {
903 set_top(bottom());
904 _scan_top = bottom();
905 CompactibleSpace::clear(mangle_space);
906 reset_bot();
907 }
908
909 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
910 Space::set_bottom(new_bottom);
911 _offsets.set_bottom(new_bottom);
912 }
913
914 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
915 assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords");
916 Space::set_end(new_end);
917 _offsets.resize(new_end - bottom());
918 }
919
920 #ifndef PRODUCT
921 void G1OffsetTableContigSpace::mangle_unused_area() {
922 mangle_unused_area_complete();
923 }
924
925 void G1OffsetTableContigSpace::mangle_unused_area_complete() {
926 SpaceMangler::mangle_region(MemRegion(top(), end()));
927 }
928 #endif
929
930 void G1OffsetTableContigSpace::print() const {
931 print_short();
932 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
933 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
934 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
935 }
|