216 assert(_end == _orig_end,
217 "we should have already filtered out humongous regions");
218
219 _in_collection_set = false;
220
221 set_young_index_in_cset(-1);
222 uninstall_surv_rate_group();
223 set_young_type(NotYoung);
224 reset_pre_dummy_top();
225
226 if (!par) {
227 // If this is parallel, this will be done later.
228 HeapRegionRemSet* hrrs = rem_set();
229 if (hrrs != NULL) hrrs->clear();
230 _claimed = InitialClaimValue;
231 }
232 zero_marked_bytes();
233
234 _offsets.resize(HeapRegion::GrainWords);
235 init_top_at_mark_start();
236 _strong_code_root_list->clear();
237 if (clear_space) clear(SpaceDecorator::Mangle);
238 }
239
240 void HeapRegion::par_clear() {
241 assert(used() == 0, "the region should have been already cleared");
242 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
243 HeapRegionRemSet* hrrs = rem_set();
244 hrrs->clear();
245 CardTableModRefBS* ct_bs =
246 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
247 ct_bs->clear(MemRegion(bottom(), end()));
248 }
249
250 void HeapRegion::calc_gc_efficiency() {
251 // GC efficiency is the ratio of how much space would be
252 // reclaimed over how long we predict it would take to reclaim it.
253 G1CollectedHeap* g1h = G1CollectedHeap::heap();
254 G1CollectorPolicy* g1p = g1h->g1_policy();
255
256 // Retrieve a prediction of the elapsed time for this region for
349 G1BlockOffsetSharedArray* sharedOffsetArray,
350 MemRegion mr) :
351 G1OffsetTableContigSpace(sharedOffsetArray, mr),
352 _hrs_index(hrs_index),
353 _humongous_type(NotHumongous), _humongous_start_region(NULL),
354 _in_collection_set(false),
355 _next_in_special_set(NULL), _orig_end(NULL),
356 _claimed(InitialClaimValue), _evacuation_failed(false),
357 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
358 _young_type(NotYoung), _next_young_region(NULL),
359 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
360 #ifdef ASSERT
361 _containing_set(NULL),
362 #endif // ASSERT
363 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
364 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
365 _predicted_bytes_to_copy(0),
366 _strong_code_root_list(NULL)
367 {
368 _orig_end = mr.end();
369 _strong_code_root_list = new (ResourceObj::C_HEAP, mtGC)
370 GrowableArray<nmethod*>(10, 0, NULL, true);
371 // Note that initialize() will set the start of the unmarked area of the
372 // region.
373 hr_clear(false /*par*/, false /*clear_space*/);
374 set_top(bottom());
375 set_saved_mark();
376
377 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
378
379 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
380 }
381
382 CompactibleSpace* HeapRegion::next_compaction_space() const {
383 // We're not using an iterator given that it will wrap around when
384 // it reaches the last region and this is not what we want here.
385 G1CollectedHeap* g1h = G1CollectedHeap::heap();
386 uint index = hrs_index() + 1;
387 while (index < g1h->n_regions()) {
388 HeapRegion* hr = g1h->region_at(index);
389 if (!hr->isHumongous()) {
390 return hr;
580
581 if (!g1h->is_obj_dead(obj)) {
582 if (next < end || !obj->is_objArray()) {
583 // This object either does not span the MemRegion
584 // boundary, or if it does it's not an array.
585 // Apply closure to whole object.
586 obj->oop_iterate(cl);
587 } else {
588 // This obj is an array that spans the boundary.
589 // Stop at the boundary.
590 obj->oop_iterate(cl, mr);
591 }
592 }
593 cur = next;
594 }
595 return NULL;
596 }
597
598 // Code roots support
599
600 void HeapRegion::push_strong_code_root(nmethod* nm) {
601 assert(nm != NULL, "sanity");
602 // Search for the code blob from the RHS to avoid
603 // duplicate entries as much as possible
604 if (_strong_code_root_list->find_from_end(nm) < 0) {
605 // Code blob isn't already in the list
606 _strong_code_root_list->push(nm);
607 }
608 }
609
610 void HeapRegion::remove_strong_code_root(nmethod* nm) {
611 assert(nm != NULL, "sanity");
612 int idx = _strong_code_root_list->find(nm);
613 while (idx >= 0) {
614 _strong_code_root_list->remove_at(idx);
615 idx = _strong_code_root_list->find(nm);
616 }
617 }
618
619 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
620 for (int i = 0; i < _strong_code_root_list->length(); i += 1) {
638 // Reference still points into the source region.
639 // Since roots are immediately evacuated this means that
640 // we must have self forwarded the object
641 assert(obj->is_forwarded(),
642 err_msg("code roots should be immediately evacuated. "
643 "Ref: "PTR_FORMAT", "
644 "Obj: "PTR_FORMAT", "
645 "Region: "HR_FORMAT,
646 p, (void*) obj, HR_FORMAT_PARAMS(_from)));
647 assert(obj->forwardee() == obj,
648 err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
649
650 // The object has been self forwarded.
651 // Note, if we're during an initial mark pause, there is
652 // no need to explicitly mark object. It will be marked
653 // during the regular evacuation failure handling code.
654 _num_self_forwarded++;
655 } else {
656 // The reference points into a promotion or to-space region
657 HeapRegion* to = _g1h->heap_region_containing(obj);
658 to->push_strong_code_root(_nm);
659 }
660 }
661 }
662
663 public:
664 NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
665 _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
666
667 void do_oop(narrowOop* p) { do_oop_work(p); }
668 void do_oop(oop* p) { do_oop_work(p); }
669
670 uint retain() { return _num_self_forwarded > 0; }
671 };
672
673 void HeapRegion::migrate_strong_code_roots() {
674 assert(in_collection_set(), "only collection set regions");
675 assert(!isHumongous(), "not humongous regions");
676
677 // List of code blobs to retain for this region
678 GrowableArray<nmethod*> to_be_retained(10);
679 G1CollectedHeap* g1h = G1CollectedHeap::heap();
680
681 while (strong_code_root_list()->is_nonempty()) {
682 nmethod *nm = strong_code_root_list()->pop();
683 if (nm != NULL) {
684 NMethodMigrationOopClosure oop_cl(g1h, this, nm);
685 nm->oops_do(&oop_cl);
686 if (oop_cl.retain()) {
687 to_be_retained.push(nm);
688 }
689 }
690 }
691
692 // Now push any code roots we need to retain
693 // FIXME: assert that region got an evacuation failure if non-empty
694 while (to_be_retained.is_nonempty()) {
695 nmethod* nm = to_be_retained.pop();
696 assert(nm != NULL, "sanity");
697 push_strong_code_root(nm);
698 }
699 }
700
701 class VerifyStrongCodeRootOopClosure: public OopClosure {
702 const HeapRegion* _hr;
703 nmethod* _nm;
704 bool _failures;
705 bool _has_oops_in_region;
706
707 template <class T> void do_oop_work(T* p) {
708 T heap_oop = oopDesc::load_heap_oop(p);
709 if (!oopDesc::is_null(heap_oop)) {
710 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
711
712 // Note: not all the oops embedded in the nmethod are in the
713 // current region. We only look at those which are.
714 if (_hr->is_in(obj)) {
715 // Object is in the region. Check that its less than top
716 if (_hr->top() <= (HeapWord*)obj) {
717 // Object is above top
1201 // this is called in parallel with other threads trying to
1202 // allocate into the region, the caller should call this while
1203 // holding a lock and when the lock is released the writes will be
1204 // flushed.
1205 }
1206 }
1207
1208 G1OffsetTableContigSpace::
1209 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1210 MemRegion mr) :
1211 _offsets(sharedOffsetArray, mr),
1212 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1213 _gc_time_stamp(0)
1214 {
1215 _offsets.set_space(this);
1216 // false ==> we'll do the clearing if there's clearing to be done.
1217 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
1218 _offsets.zero_bottom_entry();
1219 _offsets.initialize_threshold();
1220 }
1221
1222 template class GrowableArray<nmethod*>;
|
216 assert(_end == _orig_end,
217 "we should have already filtered out humongous regions");
218
219 _in_collection_set = false;
220
221 set_young_index_in_cset(-1);
222 uninstall_surv_rate_group();
223 set_young_type(NotYoung);
224 reset_pre_dummy_top();
225
226 if (!par) {
227 // If this is parallel, this will be done later.
228 HeapRegionRemSet* hrrs = rem_set();
229 if (hrrs != NULL) hrrs->clear();
230 _claimed = InitialClaimValue;
231 }
232 zero_marked_bytes();
233
234 _offsets.resize(HeapRegion::GrainWords);
235 init_top_at_mark_start();
236
237 if (_strong_code_root_list != NULL) {
238 delete _strong_code_root_list;
239 }
240 _strong_code_root_list = new (ResourceObj::C_HEAP, mtGC)
241 GrowableArray<nmethod*>(10, 0, NULL, true);
242
243 if (clear_space) clear(SpaceDecorator::Mangle);
244 }
245
246 void HeapRegion::par_clear() {
247 assert(used() == 0, "the region should have been already cleared");
248 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
249 HeapRegionRemSet* hrrs = rem_set();
250 hrrs->clear();
251 CardTableModRefBS* ct_bs =
252 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
253 ct_bs->clear(MemRegion(bottom(), end()));
254 }
255
256 void HeapRegion::calc_gc_efficiency() {
257 // GC efficiency is the ratio of how much space would be
258 // reclaimed over how long we predict it would take to reclaim it.
259 G1CollectedHeap* g1h = G1CollectedHeap::heap();
260 G1CollectorPolicy* g1p = g1h->g1_policy();
261
262 // Retrieve a prediction of the elapsed time for this region for
355 G1BlockOffsetSharedArray* sharedOffsetArray,
356 MemRegion mr) :
357 G1OffsetTableContigSpace(sharedOffsetArray, mr),
358 _hrs_index(hrs_index),
359 _humongous_type(NotHumongous), _humongous_start_region(NULL),
360 _in_collection_set(false),
361 _next_in_special_set(NULL), _orig_end(NULL),
362 _claimed(InitialClaimValue), _evacuation_failed(false),
363 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
364 _young_type(NotYoung), _next_young_region(NULL),
365 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
366 #ifdef ASSERT
367 _containing_set(NULL),
368 #endif // ASSERT
369 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
370 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
371 _predicted_bytes_to_copy(0),
372 _strong_code_root_list(NULL)
373 {
374 _orig_end = mr.end();
375 // Note that initialize() will set the start of the unmarked area of the
376 // region.
377 hr_clear(false /*par*/, false /*clear_space*/);
378 set_top(bottom());
379 set_saved_mark();
380
381 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
382
383 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
384 }
385
386 CompactibleSpace* HeapRegion::next_compaction_space() const {
387 // We're not using an iterator given that it will wrap around when
388 // it reaches the last region and this is not what we want here.
389 G1CollectedHeap* g1h = G1CollectedHeap::heap();
390 uint index = hrs_index() + 1;
391 while (index < g1h->n_regions()) {
392 HeapRegion* hr = g1h->region_at(index);
393 if (!hr->isHumongous()) {
394 return hr;
584
585 if (!g1h->is_obj_dead(obj)) {
586 if (next < end || !obj->is_objArray()) {
587 // This object either does not span the MemRegion
588 // boundary, or if it does it's not an array.
589 // Apply closure to whole object.
590 obj->oop_iterate(cl);
591 } else {
592 // This obj is an array that spans the boundary.
593 // Stop at the boundary.
594 obj->oop_iterate(cl, mr);
595 }
596 }
597 cur = next;
598 }
599 return NULL;
600 }
601
602 // Code roots support
603
604 void HeapRegion::add_strong_code_root(nmethod* nm) {
605 assert(nm != NULL, "sanity");
606 // Search for the code blob from the RHS to avoid
607 // duplicate entries as much as possible
608 if (_strong_code_root_list->find_from_end(nm) < 0) {
609 // Code blob isn't already in the list
610 _strong_code_root_list->push(nm);
611 }
612 }
613
614 void HeapRegion::remove_strong_code_root(nmethod* nm) {
615 assert(nm != NULL, "sanity");
616 int idx = _strong_code_root_list->find(nm);
617 while (idx >= 0) {
618 _strong_code_root_list->remove_at(idx);
619 idx = _strong_code_root_list->find(nm);
620 }
621 }
622
623 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
624 for (int i = 0; i < _strong_code_root_list->length(); i += 1) {
642 // Reference still points into the source region.
643 // Since roots are immediately evacuated this means that
644 // we must have self forwarded the object
645 assert(obj->is_forwarded(),
646 err_msg("code roots should be immediately evacuated. "
647 "Ref: "PTR_FORMAT", "
648 "Obj: "PTR_FORMAT", "
649 "Region: "HR_FORMAT,
650 p, (void*) obj, HR_FORMAT_PARAMS(_from)));
651 assert(obj->forwardee() == obj,
652 err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
653
654 // The object has been self forwarded.
655 // Note, if we're during an initial mark pause, there is
656 // no need to explicitly mark object. It will be marked
657 // during the regular evacuation failure handling code.
658 _num_self_forwarded++;
659 } else {
660 // The reference points into a promotion or to-space region
661 HeapRegion* to = _g1h->heap_region_containing(obj);
662 to->add_strong_code_root(_nm);
663 }
664 }
665 }
666
667 public:
668 NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
669 _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
670
671 void do_oop(narrowOop* p) { do_oop_work(p); }
672 void do_oop(oop* p) { do_oop_work(p); }
673
674 uint retain() { return _num_self_forwarded > 0; }
675 };
676
677 void HeapRegion::migrate_strong_code_roots() {
678 assert(in_collection_set(), "only collection set regions");
679 assert(!isHumongous(), "not humongous regions");
680
681 // List of code blobs to retain for this region
682 GrowableArray<nmethod*> to_be_retained(10);
683 G1CollectedHeap* g1h = G1CollectedHeap::heap();
684
685 while (strong_code_root_list()->is_nonempty()) {
686 nmethod *nm = strong_code_root_list()->pop();
687 if (nm != NULL) {
688 NMethodMigrationOopClosure oop_cl(g1h, this, nm);
689 nm->oops_do(&oop_cl);
690 if (oop_cl.retain()) {
691 to_be_retained.push(nm);
692 }
693 }
694 }
695
696 // Now push any code roots we need to retain
697 // FIXME: assert that region got an evacuation failure if non-empty
698 while (to_be_retained.is_nonempty()) {
699 nmethod* nm = to_be_retained.pop();
700 assert(nm != NULL, "sanity");
701 add_strong_code_root(nm);
702 }
703 }
704
705 class VerifyStrongCodeRootOopClosure: public OopClosure {
706 const HeapRegion* _hr;
707 nmethod* _nm;
708 bool _failures;
709 bool _has_oops_in_region;
710
711 template <class T> void do_oop_work(T* p) {
712 T heap_oop = oopDesc::load_heap_oop(p);
713 if (!oopDesc::is_null(heap_oop)) {
714 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
715
716 // Note: not all the oops embedded in the nmethod are in the
717 // current region. We only look at those which are.
718 if (_hr->is_in(obj)) {
719 // Object is in the region. Check that its less than top
720 if (_hr->top() <= (HeapWord*)obj) {
721 // Object is above top
1205 // this is called in parallel with other threads trying to
1206 // allocate into the region, the caller should call this while
1207 // holding a lock and when the lock is released the writes will be
1208 // flushed.
1209 }
1210 }
1211
1212 G1OffsetTableContigSpace::
1213 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1214 MemRegion mr) :
1215 _offsets(sharedOffsetArray, mr),
1216 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1217 _gc_time_stamp(0)
1218 {
1219 _offsets.set_space(this);
1220 // false ==> we'll do the clearing if there's clearing to be done.
1221 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
1222 _offsets.zero_bottom_entry();
1223 _offsets.initialize_threshold();
1224 }
|