20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/shared/liveRange.hpp"
35 #include "memory/genOopClosures.inline.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/space.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40
41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
42
43 int HeapRegion::LogOfHRGrainBytes = 0;
44 int HeapRegion::LogOfHRGrainWords = 0;
45 size_t HeapRegion::GrainBytes = 0;
46 size_t HeapRegion::GrainWords = 0;
47 size_t HeapRegion::CardsPerRegion = 0;
48
49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
50 HeapRegion* hr,
51 G1ParPushHeapRSClosure* cl,
52 CardTableModRefBS::PrecisionStyle precision) :
53 DirtyCardToOopClosure(hr, cl, precision, NULL),
54 _hr(hr), _rs_scan(cl), _g1(g1) { }
55
56 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
57 OopClosure* oc) :
58 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
59
194 hrrs->clear();
195 CardTableModRefBS* ct_bs =
196 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
197 ct_bs->clear(MemRegion(bottom(), end()));
198 }
199
200 void HeapRegion::calc_gc_efficiency() {
201 // GC efficiency is the ratio of how much space would be
202 // reclaimed over how long we predict it would take to reclaim it.
203 G1CollectedHeap* g1h = G1CollectedHeap::heap();
204 G1CollectorPolicy* g1p = g1h->g1_policy();
205
206 // Retrieve a prediction of the elapsed time for this region for
207 // a mixed gc because the region will only be evacuated during a
208 // mixed gc.
209 double region_elapsed_time_ms =
210 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
211 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
212 }
213
214 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
215 assert(!isHumongous(), "sanity / pre-condition");
216 assert(end() == _orig_end,
217 "Should be normal before the humongous object allocation");
218 assert(top() == bottom(), "should be empty");
219 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
220
221 _type.set_starts_humongous();
222 _humongous_start_region = this;
223
224 set_end(new_end);
225 _offsets.set_for_starts_humongous(new_top);
226 }
227
228 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
229 assert(!isHumongous(), "sanity / pre-condition");
230 assert(end() == _orig_end,
231 "Should be normal before the humongous object allocation");
232 assert(top() == bottom(), "should be empty");
233 assert(first_hr->startsHumongous(), "pre-condition");
234
235 _type.set_continues_humongous();
236 _humongous_start_region = first_hr;
237 }
238
239 void HeapRegion::clear_humongous() {
240 assert(isHumongous(), "pre-condition");
241
242 if (startsHumongous()) {
243 assert(top() <= end(), "pre-condition");
244 set_end(_orig_end);
245 if (top() > end()) {
246 // at least one "continues humongous" region after it
247 set_top(end());
248 }
249 } else {
250 // continues humongous
251 assert(end() == _orig_end, "sanity");
252 }
253
254 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
284 #endif // ASSERT
285 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
286 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
287 _predicted_bytes_to_copy(0)
288 {
289 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
290 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
291
292 initialize(mr);
293 }
294
295 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
296 assert(_rem_set->is_empty(), "Remembered set must be empty");
297
298 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
299
300 _orig_end = mr.end();
301 hr_clear(false /*par*/, false /*clear_space*/);
302 set_top(bottom());
303 record_timestamp();
304 }
305
306 CompactibleSpace* HeapRegion::next_compaction_space() const {
307 return G1CollectedHeap::heap()->next_compaction_region(this);
308 }
309
310 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
311 bool during_conc_mark) {
312 // We always recreate the prev marking info and we'll explicitly
313 // mark all objects we find to be self-forwarded on the prev
314 // bitmap. So all objects need to be below PTAMS.
315 _prev_marked_bytes = 0;
316
317 if (during_initial_mark) {
318 // During initial-mark, we'll also explicitly mark all objects
319 // we find to be self-forwarded on the next bitmap. So all
320 // objects need to be below NTAMS.
321 _next_top_at_mark_start = top();
322 _next_marked_bytes = 0;
323 } else if (during_conc_mark) {
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/shared/liveRange.hpp"
35 #include "memory/genOopClosures.inline.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/space.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "gc_implementation/g1/heapRegionTracer.hpp"
41
42 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
43
44 int HeapRegion::LogOfHRGrainBytes = 0;
45 int HeapRegion::LogOfHRGrainWords = 0;
46 size_t HeapRegion::GrainBytes = 0;
47 size_t HeapRegion::GrainWords = 0;
48 size_t HeapRegion::CardsPerRegion = 0;
49
50 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
51 HeapRegion* hr,
52 G1ParPushHeapRSClosure* cl,
53 CardTableModRefBS::PrecisionStyle precision) :
54 DirtyCardToOopClosure(hr, cl, precision, NULL),
55 _hr(hr), _rs_scan(cl), _g1(g1) { }
56
57 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
58 OopClosure* oc) :
59 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
60
195 hrrs->clear();
196 CardTableModRefBS* ct_bs =
197 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
198 ct_bs->clear(MemRegion(bottom(), end()));
199 }
200
201 void HeapRegion::calc_gc_efficiency() {
202 // GC efficiency is the ratio of how much space would be
203 // reclaimed over how long we predict it would take to reclaim it.
204 G1CollectedHeap* g1h = G1CollectedHeap::heap();
205 G1CollectorPolicy* g1p = g1h->g1_policy();
206
207 // Retrieve a prediction of the elapsed time for this region for
208 // a mixed gc because the region will only be evacuated during a
209 // mixed gc.
210 double region_elapsed_time_ms =
211 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
212 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
213 }
214
215 void HeapRegion::set_free() {
216 report_region_type_change(G1HeapRegionTraceType::Free);
217 _type.set_free();
218 }
219
220 void HeapRegion::set_eden() {
221 report_region_type_change(G1HeapRegionTraceType::Eden);
222 _type.set_eden();
223 }
224
225 void HeapRegion::set_eden_pre_gc() {
226 report_region_type_change(G1HeapRegionTraceType::Eden);
227 _type.set_eden_pre_gc();
228 }
229
230 void HeapRegion::set_survivor() {
231 report_region_type_change(G1HeapRegionTraceType::Survivor);
232 _type.set_survivor();
233 }
234
235 void HeapRegion::set_old() {
236 report_region_type_change(G1HeapRegionTraceType::Old);
237 _type.set_old();
238 }
239
240 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
241 assert(!isHumongous(), "sanity / pre-condition");
242 assert(end() == _orig_end,
243 "Should be normal before the humongous object allocation");
244 assert(top() == bottom(), "should be empty");
245 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
246
247 report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
248 _type.set_starts_humongous();
249 _humongous_start_region = this;
250
251 set_end(new_end);
252 _offsets.set_for_starts_humongous(new_top);
253 }
254
255 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
256 assert(!isHumongous(), "sanity / pre-condition");
257 assert(end() == _orig_end,
258 "Should be normal before the humongous object allocation");
259 assert(top() == bottom(), "should be empty");
260 assert(first_hr->startsHumongous(), "pre-condition");
261
262 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
263 _type.set_continues_humongous();
264 _humongous_start_region = first_hr;
265 }
266
267 void HeapRegion::clear_humongous() {
268 assert(isHumongous(), "pre-condition");
269
270 if (startsHumongous()) {
271 assert(top() <= end(), "pre-condition");
272 set_end(_orig_end);
273 if (top() > end()) {
274 // at least one "continues humongous" region after it
275 set_top(end());
276 }
277 } else {
278 // continues humongous
279 assert(end() == _orig_end, "sanity");
280 }
281
282 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
312 #endif // ASSERT
313 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
314 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
315 _predicted_bytes_to_copy(0)
316 {
317 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
318 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
319
320 initialize(mr);
321 }
322
323 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
324 assert(_rem_set->is_empty(), "Remembered set must be empty");
325
326 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
327
328 _orig_end = mr.end();
329 hr_clear(false /*par*/, false /*clear_space*/);
330 set_top(bottom());
331 record_timestamp();
332 }
333
334 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
335 HeapRegionTracer::send_region_type_change(_hrm_index,
336 get_trace_type(),
337 to,
338 (uintptr_t)bottom(),
339 used());
340 }
341
342 CompactibleSpace* HeapRegion::next_compaction_space() const {
343 return G1CollectedHeap::heap()->next_compaction_region(this);
344 }
345
346 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
347 bool during_conc_mark) {
348 // We always recreate the prev marking info and we'll explicitly
349 // mark all objects we find to be self-forwarded on the prev
350 // bitmap. So all objects need to be below PTAMS.
351 _prev_marked_bytes = 0;
352
353 if (during_initial_mark) {
354 // During initial-mark, we'll also explicitly mark all objects
355 // we find to be self-forwarded on the next bitmap. So all
356 // objects need to be below NTAMS.
357 _next_top_at_mark_start = top();
358 _next_marked_bytes = 0;
359 } else if (during_conc_mark) {
|