15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "oops/oop.inline.hpp"
35
36 int HeapRegion::LogOfHRGrainBytes = 0;
37 int HeapRegion::LogOfHRGrainWords = 0;
38 size_t HeapRegion::GrainBytes = 0;
39 size_t HeapRegion::GrainWords = 0;
40 size_t HeapRegion::CardsPerRegion = 0;
41
42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
43 HeapRegion* hr, ExtendedOopClosure* cl,
44 CardTableModRefBS::PrecisionStyle precision,
45 FilterKind fk) :
46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
47 _hr(hr), _fk(fk), _g1(g1) { }
48
49 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
50 OopClosure* oc) :
51 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
52
53 template<class ClosureType>
54 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
215 assert(_end == _orig_end,
216 "we should have already filtered out humongous regions");
217
218 _in_collection_set = false;
219
220 set_young_index_in_cset(-1);
221 uninstall_surv_rate_group();
222 set_young_type(NotYoung);
223 reset_pre_dummy_top();
224
225 if (!par) {
226 // If this is parallel, this will be done later.
227 HeapRegionRemSet* hrrs = rem_set();
228 if (hrrs != NULL) hrrs->clear();
229 _claimed = InitialClaimValue;
230 }
231 zero_marked_bytes();
232
233 _offsets.resize(HeapRegion::GrainWords);
234 init_top_at_mark_start();
235 if (clear_space) clear(SpaceDecorator::Mangle);
236 }
237
238 void HeapRegion::par_clear() {
239 assert(used() == 0, "the region should have been already cleared");
240 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
241 HeapRegionRemSet* hrrs = rem_set();
242 hrrs->clear();
243 CardTableModRefBS* ct_bs =
244 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
245 ct_bs->clear(MemRegion(bottom(), end()));
246 }
247
248 void HeapRegion::calc_gc_efficiency() {
249 // GC efficiency is the ratio of how much space would be
250 // reclaimed over how long we predict it would take to reclaim it.
251 G1CollectedHeap* g1h = G1CollectedHeap::heap();
252 G1CollectorPolicy* g1p = g1h->g1_policy();
253
254 // Retrieve a prediction of the elapsed time for this region for
343 #endif // _MSC_VER
344
345
346 HeapRegion::HeapRegion(uint hrs_index,
347 G1BlockOffsetSharedArray* sharedOffsetArray,
348 MemRegion mr) :
349 G1OffsetTableContigSpace(sharedOffsetArray, mr),
350 _hrs_index(hrs_index),
351 _humongous_type(NotHumongous), _humongous_start_region(NULL),
352 _in_collection_set(false),
353 _next_in_special_set(NULL), _orig_end(NULL),
354 _claimed(InitialClaimValue), _evacuation_failed(false),
355 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
356 _young_type(NotYoung), _next_young_region(NULL),
357 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
358 #ifdef ASSERT
359 _containing_set(NULL),
360 #endif // ASSERT
361 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
362 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
363 _predicted_bytes_to_copy(0)
364 {
365 _orig_end = mr.end();
366 // Note that initialize() will set the start of the unmarked area of the
367 // region.
368 hr_clear(false /*par*/, false /*clear_space*/);
369 set_top(bottom());
370 set_saved_mark();
371
372 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
373
374 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
375 }
376
377 CompactibleSpace* HeapRegion::next_compaction_space() const {
378 // We're not using an iterator given that it will wrap around when
379 // it reaches the last region and this is not what we want here.
380 G1CollectedHeap* g1h = G1CollectedHeap::heap();
381 uint index = hrs_index() + 1;
382 while (index < g1h->n_regions()) {
383 HeapRegion* hr = g1h->region_at(index);
573 // Otherwise:
574 next = (cur + obj->size());
575
576 if (!g1h->is_obj_dead(obj)) {
577 if (next < end || !obj->is_objArray()) {
578 // This object either does not span the MemRegion
579 // boundary, or if it does it's not an array.
580 // Apply closure to whole object.
581 obj->oop_iterate(cl);
582 } else {
583 // This obj is an array that spans the boundary.
584 // Stop at the boundary.
585 obj->oop_iterate(cl, mr);
586 }
587 }
588 cur = next;
589 }
590 return NULL;
591 }
592
593 void HeapRegion::print() const { print_on(gclog_or_tty); }
594 void HeapRegion::print_on(outputStream* st) const {
595 if (isHumongous()) {
596 if (startsHumongous())
597 st->print(" HS");
598 else
599 st->print(" HC");
600 } else {
601 st->print(" ");
602 }
603 if (in_collection_set())
604 st->print(" CS");
605 else
606 st->print(" ");
607 if (is_young())
608 st->print(is_survivor() ? " SU" : " Y ");
609 else
610 st->print(" ");
611 if (is_empty())
612 st->print(" F");
877
878 // Loook up end - 1
879 HeapWord* addr_4 = the_end - 1;
880 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
881 if (b_start_4 != p) {
882 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
883 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
884 addr_4, b_start_4, p);
885 *failures = true;
886 return;
887 }
888 }
889
890 if (is_humongous && object_num > 1) {
891 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
892 "but has "SIZE_FORMAT", objects",
893 bottom(), end(), object_num);
894 *failures = true;
895 return;
896 }
897 }
898
899 void HeapRegion::verify() const {
900 bool dummy = false;
901 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
902 }
903
904 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
905 // away eventually.
906
907 void G1OffsetTableContigSpace::clear(bool mangle_space) {
908 ContiguousSpace::clear(mangle_space);
909 _offsets.zero_bottom_entry();
910 _offsets.initialize_threshold();
911 }
912
913 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
914 Space::set_bottom(new_bottom);
915 _offsets.set_bottom(new_bottom);
916 }
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "utilities/growableArray.hpp"
36
37 int HeapRegion::LogOfHRGrainBytes = 0;
38 int HeapRegion::LogOfHRGrainWords = 0;
39 size_t HeapRegion::GrainBytes = 0;
40 size_t HeapRegion::GrainWords = 0;
41 size_t HeapRegion::CardsPerRegion = 0;
42
43 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
44 HeapRegion* hr, ExtendedOopClosure* cl,
45 CardTableModRefBS::PrecisionStyle precision,
46 FilterKind fk) :
47 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
48 _hr(hr), _fk(fk), _g1(g1) { }
49
50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
51 OopClosure* oc) :
52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
53
54 template<class ClosureType>
55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
216 assert(_end == _orig_end,
217 "we should have already filtered out humongous regions");
218
219 _in_collection_set = false;
220
221 set_young_index_in_cset(-1);
222 uninstall_surv_rate_group();
223 set_young_type(NotYoung);
224 reset_pre_dummy_top();
225
226 if (!par) {
227 // If this is parallel, this will be done later.
228 HeapRegionRemSet* hrrs = rem_set();
229 if (hrrs != NULL) hrrs->clear();
230 _claimed = InitialClaimValue;
231 }
232 zero_marked_bytes();
233
234 _offsets.resize(HeapRegion::GrainWords);
235 init_top_at_mark_start();
236
237 if (_strong_code_root_list != NULL) {
238 delete _strong_code_root_list;
239 }
240 _strong_code_root_list = new (ResourceObj::C_HEAP, mtGC)
241 GrowableArray<nmethod*>(10, 0, NULL, true);
242
243 if (clear_space) clear(SpaceDecorator::Mangle);
244 }
245
246 void HeapRegion::par_clear() {
247 assert(used() == 0, "the region should have been already cleared");
248 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
249 HeapRegionRemSet* hrrs = rem_set();
250 hrrs->clear();
251 CardTableModRefBS* ct_bs =
252 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
253 ct_bs->clear(MemRegion(bottom(), end()));
254 }
255
256 void HeapRegion::calc_gc_efficiency() {
257 // GC efficiency is the ratio of how much space would be
258 // reclaimed over how long we predict it would take to reclaim it.
259 G1CollectedHeap* g1h = G1CollectedHeap::heap();
260 G1CollectorPolicy* g1p = g1h->g1_policy();
261
262 // Retrieve a prediction of the elapsed time for this region for
351 #endif // _MSC_VER
352
353
354 HeapRegion::HeapRegion(uint hrs_index,
355 G1BlockOffsetSharedArray* sharedOffsetArray,
356 MemRegion mr) :
357 G1OffsetTableContigSpace(sharedOffsetArray, mr),
358 _hrs_index(hrs_index),
359 _humongous_type(NotHumongous), _humongous_start_region(NULL),
360 _in_collection_set(false),
361 _next_in_special_set(NULL), _orig_end(NULL),
362 _claimed(InitialClaimValue), _evacuation_failed(false),
363 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
364 _young_type(NotYoung), _next_young_region(NULL),
365 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
366 #ifdef ASSERT
367 _containing_set(NULL),
368 #endif // ASSERT
369 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
370 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
371 _predicted_bytes_to_copy(0), _strong_code_root_list(NULL)
372 {
373 _orig_end = mr.end();
374 // Note that initialize() will set the start of the unmarked area of the
375 // region.
376 hr_clear(false /*par*/, false /*clear_space*/);
377 set_top(bottom());
378 set_saved_mark();
379
380 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
381
382 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
383 }
384
385 CompactibleSpace* HeapRegion::next_compaction_space() const {
386 // We're not using an iterator given that it will wrap around when
387 // it reaches the last region and this is not what we want here.
388 G1CollectedHeap* g1h = G1CollectedHeap::heap();
389 uint index = hrs_index() + 1;
390 while (index < g1h->n_regions()) {
391 HeapRegion* hr = g1h->region_at(index);
581 // Otherwise:
582 next = (cur + obj->size());
583
584 if (!g1h->is_obj_dead(obj)) {
585 if (next < end || !obj->is_objArray()) {
586 // This object either does not span the MemRegion
587 // boundary, or if it does it's not an array.
588 // Apply closure to whole object.
589 obj->oop_iterate(cl);
590 } else {
591 // This obj is an array that spans the boundary.
592 // Stop at the boundary.
593 obj->oop_iterate(cl, mr);
594 }
595 }
596 cur = next;
597 }
598 return NULL;
599 }
600
601 // Code roots support
602
603 void HeapRegion::add_strong_code_root(nmethod* nm) {
604 assert(nm != NULL, "sanity");
605 // Search for the code blob from the RHS to avoid
606 // duplicate entries as much as possible
607 if (_strong_code_root_list->find_from_end(nm) < 0) {
608 // Code blob isn't already in the list
609 _strong_code_root_list->push(nm);
610 }
611 }
612
613 void HeapRegion::remove_strong_code_root(nmethod* nm) {
614 assert(nm != NULL, "sanity");
615 int idx = _strong_code_root_list->find(nm);
616 while (idx >= 0) {
617 _strong_code_root_list->remove_at(idx);
618 idx = _strong_code_root_list->find(nm);
619 }
620 }
621
622 class NMethodMigrationOopClosure : public OopClosure {
623 G1CollectedHeap* _g1h;
624 HeapRegion* _from;
625 nmethod* _nm;
626
627 uint _num_self_forwarded;
628
629 template <class T> void do_oop_work(T* p) {
630 T heap_oop = oopDesc::load_heap_oop(p);
631 if (!oopDesc::is_null(heap_oop)) {
632 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
633 if (_from->is_in(obj)) {
634 // Reference still points into the source region.
635 // Since roots are immediately evacuated this means that
636 // we must have self forwarded the object
637 assert(obj->is_forwarded(),
638 err_msg("code roots should be immediately evacuated. "
639 "Ref: "PTR_FORMAT", "
640 "Obj: "PTR_FORMAT", "
641 "Region: "HR_FORMAT,
642 p, (void*) obj, HR_FORMAT_PARAMS(_from)));
643 assert(obj->forwardee() == obj,
644 err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
645
646 // The object has been self forwarded.
647 // Note, if we're during an initial mark pause, there is
648 // no need to explicitly mark object. It will be marked
649 // during the regular evacuation failure handling code.
650 _num_self_forwarded++;
651 } else {
652 // The reference points into a promotion or to-space region
653 HeapRegion* to = _g1h->heap_region_containing(obj);
654 to->add_strong_code_root(_nm);
655 }
656 }
657 }
658
659 public:
660 NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
661 _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
662
663 void do_oop(narrowOop* p) { do_oop_work(p); }
664 void do_oop(oop* p) { do_oop_work(p); }
665
666 uint retain() { return _num_self_forwarded > 0; }
667 };
668
669 void HeapRegion::migrate_strong_code_roots() {
670 assert(in_collection_set(), "only collection set regions");
671 assert(!isHumongous(), "not humongous regions");
672
673 ResourceMark rm;
674
675 // List of code blobs to retain for this region
676 GrowableArray<nmethod*> to_be_retained(10);
677 G1CollectedHeap* g1h = G1CollectedHeap::heap();
678
679 while (strong_code_root_list()->is_nonempty()) {
680 nmethod *nm = strong_code_root_list()->pop();
681 if (nm != NULL) {
682 NMethodMigrationOopClosure oop_cl(g1h, this, nm);
683 nm->oops_do(&oop_cl);
684 if (oop_cl.retain()) {
685 to_be_retained.push(nm);
686 }
687 }
688 }
689
690 // Now push any code roots we need to retain
691 // FIXME: assert that region got an evacuation failure if non-empty
692 while (to_be_retained.is_nonempty()) {
693 nmethod* nm = to_be_retained.pop();
694 assert(nm != NULL, "sanity");
695 add_strong_code_root(nm);
696 }
697 }
698
699 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
700 for (int i = 0; i < _strong_code_root_list->length(); i += 1) {
701 nmethod* nm = _strong_code_root_list->at(i);
702 blk->do_code_blob(nm);
703 }
704 }
705
706 size_t HeapRegion::strong_code_root_mem_size() {
707 return sizeof(GrowableArray<nmethod*>) +
708 _strong_code_root_list->max_length() * sizeof(nmethod*);
709 }
710
711 class VerifyStrongCodeRootOopClosure: public OopClosure {
712 const HeapRegion* _hr;
713 nmethod* _nm;
714 bool _failures;
715 bool _has_oops_in_region;
716
717 template <class T> void do_oop_work(T* p) {
718 T heap_oop = oopDesc::load_heap_oop(p);
719 if (!oopDesc::is_null(heap_oop)) {
720 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
721
722 // Note: not all the oops embedded in the nmethod are in the
723 // current region. We only look at those which are.
724 if (_hr->is_in(obj)) {
725 // Object is in the region. Check that its less than top
726 if (_hr->top() <= (HeapWord*)obj) {
727 // Object is above top
728 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
729 "["PTR_FORMAT", "PTR_FORMAT") is above "
730 "top "PTR_FORMAT,
731 obj, _hr->bottom(), _hr->end(), _hr->top());
732 _failures = true;
733 return;
734 }
735 // Nmethod has at least one oop in the current region
736 _has_oops_in_region = true;
737 }
738 }
739 }
740
741 public:
742 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
743 _hr(hr), _failures(false), _has_oops_in_region(false) {}
744
745 void do_oop(narrowOop* p) { do_oop_work(p); }
746 void do_oop(oop* p) { do_oop_work(p); }
747
748 bool failures() { return _failures; }
749 bool has_oops_in_region() { return _has_oops_in_region; }
750 };
751
752 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
753 const HeapRegion* _hr;
754 bool _failures;
755 public:
756 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
757 _hr(hr), _failures(false) {}
758
759 void do_code_blob(CodeBlob* cb) {
760 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
761 if (nm != NULL) {
762 // Verify that the nemthod is live
763 if (!nm->is_alive()) {
764 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
765 PTR_FORMAT" in its strong code roots",
766 _hr->bottom(), _hr->end(), nm);
767 _failures = true;
768 } else {
769 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
770 nm->oops_do(&oop_cl);
771 if (!oop_cl.has_oops_in_region()) {
772 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
773 PTR_FORMAT" in its strong code roots "
774 "with no pointers into region",
775 _hr->bottom(), _hr->end(), nm);
776 _failures = true;
777 } else if (oop_cl.failures()) {
778 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
779 "failures for nmethod "PTR_FORMAT,
780 _hr->bottom(), _hr->end(), nm);
781 _failures = true;
782 }
783 }
784 }
785 }
786
787 bool failures() { return _failures; }
788 };
789
790 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
791 if (!G1VerifyHeapRegionCodeRoots) {
792 // We're not verifying code roots.
793 return;
794 }
795 if (vo == VerifyOption_G1UseMarkWord) {
796 // Marking verification during a full GC is performed after class
797 // unloading, code cache unloading, etc so the strong code roots
798 // attached to each heap region are in an inconsistent state. They won't
799 // be consistent until the strong code roots are rebuilt after the
800 // actual GC. Skip verifying the strong code roots in this particular
801 // time.
802 assert(VerifyDuringGC, "only way to get here");
803 return;
804 }
805
806 // if this region is empty then there should be no entries
807 // on its strong code root list
808 if (is_empty()) {
809 if (!_strong_code_root_list->is_empty()) {
810 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
811 "but has "INT32_FORMAT" code root entries",
812 bottom(), end(), _strong_code_root_list->length());
813 *failures = true;
814 }
815 return;
816 }
817
818 // An H-region should have an empty strong code root list
819 if (isHumongous()) {
820 if (!_strong_code_root_list->is_empty()) {
821 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
822 "but has "INT32_FORMAT" code root entries",
823 bottom(), end(), _strong_code_root_list->length());
824 *failures = true;
825 }
826 return;
827 }
828
829 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
830 strong_code_roots_do(&cb_cl);
831
832 if (cb_cl.failures()) {
833 *failures = true;
834 }
835 }
836
837 void HeapRegion::print() const { print_on(gclog_or_tty); }
838 void HeapRegion::print_on(outputStream* st) const {
839 if (isHumongous()) {
840 if (startsHumongous())
841 st->print(" HS");
842 else
843 st->print(" HC");
844 } else {
845 st->print(" ");
846 }
847 if (in_collection_set())
848 st->print(" CS");
849 else
850 st->print(" ");
851 if (is_young())
852 st->print(is_survivor() ? " SU" : " Y ");
853 else
854 st->print(" ");
855 if (is_empty())
856 st->print(" F");
1121
1122 // Loook up end - 1
1123 HeapWord* addr_4 = the_end - 1;
1124 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
1125 if (b_start_4 != p) {
1126 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
1127 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1128 addr_4, b_start_4, p);
1129 *failures = true;
1130 return;
1131 }
1132 }
1133
1134 if (is_humongous && object_num > 1) {
1135 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
1136 "but has "SIZE_FORMAT", objects",
1137 bottom(), end(), object_num);
1138 *failures = true;
1139 return;
1140 }
1141
1142 verify_strong_code_roots(vo, failures);
1143 }
1144
1145 void HeapRegion::verify() const {
1146 bool dummy = false;
1147 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
1148 }
1149
1150 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1151 // away eventually.
1152
1153 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1154 ContiguousSpace::clear(mangle_space);
1155 _offsets.zero_bottom_entry();
1156 _offsets.initialize_threshold();
1157 }
1158
1159 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1160 Space::set_bottom(new_bottom);
1161 _offsets.set_bottom(new_bottom);
1162 }
|