< prev index next >

src/share/vm/gc/g1/heapRegion.cpp

Print this page




  50                                  G1ParPushHeapRSClosure* cl,
  51                                  CardTableModRefBS::PrecisionStyle precision) :
  52   DirtyCardToOopClosure(hr, cl, precision, NULL),
  53   _hr(hr), _rs_scan(cl), _g1(g1) { }
  54 
  55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
  56                                                    OopClosure* oc) :
  57   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
  58 
  59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
  60                                       HeapWord* bottom,
  61                                       HeapWord* top) {
  62   G1CollectedHeap* g1h = _g1;
  63   size_t oop_size;
  64   HeapWord* cur = bottom;
  65 
  66   // Start filtering what we add to the remembered set. If the object is
  67   // not considered dead, either because it is marked (in the mark bitmap)
  68   // or it was allocated after marking finished, then we add it. Otherwise
  69   // we can safely ignore the object.
  70   if (!g1h->is_obj_dead(oop(cur), _hr)) {
  71     oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
  72   } else {
  73     oop_size = _hr->block_size(cur);
  74   }
  75 
  76   cur += oop_size;
  77 
  78   if (cur < top) {
  79     oop cur_oop = oop(cur);
  80     oop_size = _hr->block_size(cur);
  81     HeapWord* next_obj = cur + oop_size;
  82     while (next_obj < top) {
  83       // Keep filtering the remembered set.
  84       if (!g1h->is_obj_dead(cur_oop, _hr)) {
  85         // Bottom lies entirely below top, so we can call the
  86         // non-memRegion version of oop_iterate below.
  87         cur_oop->oop_iterate(_rs_scan);
  88       }
  89       cur = next_obj;
  90       cur_oop = oop(cur);
  91       oop_size = _hr->block_size(cur);
  92       next_obj = cur + oop_size;
  93     }
  94 
  95     // Last object. Need to do dead-obj filtering here too.
  96     if (!g1h->is_obj_dead(oop(cur), _hr)) {
  97       oop(cur)->oop_iterate(_rs_scan, mr);
  98     }
  99   }
 100 }
 101 
 102 size_t HeapRegion::max_region_size() {
 103   return HeapRegionBounds::max_size();
 104 }
 105 
 106 size_t HeapRegion::min_region_size_in_words() {
 107   return HeapRegionBounds::min_size() >> LogHeapWordSize;
 108 }
 109 
 110 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 111   size_t region_size = G1HeapRegionSize;
 112   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
 113     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 114     region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
 115                        HeapRegionBounds::min_size());
 116   }


 145 
 146   guarantee(GrainWords == 0, "we should only set it once");
 147   GrainWords = GrainBytes >> LogHeapWordSize;
 148   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
 149 
 150   guarantee(CardsPerRegion == 0, "we should only set it once");
 151   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
 152 }
 153 
 154 void HeapRegion::reset_after_compaction() {
 155   G1OffsetTableContigSpace::reset_after_compaction();
 156   // After a compaction the mark bitmap is invalid, so we must
 157   // treat all objects as being inside the unmarked area.
 158   zero_marked_bytes();
 159   init_top_at_mark_start();
 160 }
 161 
 162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
 163   assert(_humongous_start_region == NULL,
 164          "we should have already filtered out humongous regions");
 165   assert(_end == orig_end(),
 166          "we should have already filtered out humongous regions");
 167   assert(!in_collection_set(),
 168          "Should not clear heap region %u in the collection set", hrm_index());
 169 
 170   set_allocation_context(AllocationContext::system());
 171   set_young_index_in_cset(-1);
 172   uninstall_surv_rate_group();
 173   set_free();
 174   reset_pre_dummy_top();
 175 
 176   if (!par) {
 177     // If this is parallel, this will be done later.
 178     HeapRegionRemSet* hrrs = rem_set();
 179     if (locked) {
 180       hrrs->clear_locked();
 181     } else {
 182       hrrs->clear();
 183     }
 184   }
 185   zero_marked_bytes();
 186 


 196   hrrs->clear();
 197   CardTableModRefBS* ct_bs =
 198     barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
 199   ct_bs->clear(MemRegion(bottom(), end()));
 200 }
 201 
 202 void HeapRegion::calc_gc_efficiency() {
 203   // GC efficiency is the ratio of how much space would be
 204   // reclaimed over how long we predict it would take to reclaim it.
 205   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 206   G1CollectorPolicy* g1p = g1h->g1_policy();
 207 
 208   // Retrieve a prediction of the elapsed time for this region for
 209   // a mixed gc because the region will only be evacuated during a
 210   // mixed gc.
 211   double region_elapsed_time_ms =
 212     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
 213   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 214 }
 215 
 216 void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
 217   assert(!is_humongous(), "sanity / pre-condition");
 218   assert(end() == orig_end(),
 219          "Should be normal before the humongous object allocation");
 220   assert(top() == bottom(), "should be empty");
 221   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 222 
 223   _type.set_starts_humongous();
 224   _humongous_start_region = this;
 225 
 226   set_end(new_end);
 227   _offsets.set_for_starts_humongous(new_top);
 228 }
 229 
 230 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
 231   assert(!is_humongous(), "sanity / pre-condition");
 232   assert(end() == orig_end(),
 233          "Should be normal before the humongous object allocation");
 234   assert(top() == bottom(), "should be empty");
 235   assert(first_hr->is_starts_humongous(), "pre-condition");
 236 
 237   _type.set_continues_humongous();
 238   _humongous_start_region = first_hr;
 239 }
 240 
 241 void HeapRegion::clear_humongous() {
 242   assert(is_humongous(), "pre-condition");
 243 
 244   if (is_starts_humongous()) {
 245     assert(top() <= end(), "pre-condition");
 246     set_end(orig_end());
 247     if (top() > end()) {
 248       // at least one "continues humongous" region after it
 249       set_top(end());
 250     }
 251   } else {
 252     // continues humongous
 253     assert(end() == orig_end(), "sanity");
 254   }
 255 
 256   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
 257   _humongous_start_region = NULL;
 258 }
 259 
 260 HeapRegion::HeapRegion(uint hrm_index,
 261                        G1BlockOffsetSharedArray* sharedOffsetArray,
 262                        MemRegion mr) :
 263     G1OffsetTableContigSpace(sharedOffsetArray, mr),
 264     _hrm_index(hrm_index),
 265     _allocation_context(AllocationContext::system()),
 266     _humongous_start_region(NULL),
 267     _next_in_special_set(NULL),
 268     _evacuation_failed(false),
 269     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
 270     _next_young_region(NULL),
 271     _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
 272 #ifdef ASSERT
 273     _containing_set(NULL),
 274 #endif // ASSERT
 275      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 276     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 277     _predicted_bytes_to_copy(0)
 278 {
 279   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
 280   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 281 
 282   initialize(mr);
 283 }
 284 
 285 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 286   assert(_rem_set->is_empty(), "Remembered set must be empty");
 287 
 288   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 289 
 290   hr_clear(false /*par*/, false /*clear_space*/);
 291   set_top(bottom());
 292   record_timestamp();
 293 
 294   assert(mr.end() == orig_end(),
 295          "Given region end address " PTR_FORMAT " should match exactly "
 296          "bottom plus one region size, i.e. " PTR_FORMAT,
 297          p2i(mr.end()), p2i(orig_end()));
 298 }
 299 
 300 CompactibleSpace* HeapRegion::next_compaction_space() const {
 301   return G1CollectedHeap::heap()->next_compaction_region(this);
 302 }
 303 
 304 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 305                                                     bool during_conc_mark) {
 306   // We always recreate the prev marking info and we'll explicitly
 307   // mark all objects we find to be self-forwarded on the prev
 308   // bitmap. So all objects need to be below PTAMS.
 309   _prev_marked_bytes = 0;
 310 
 311   if (during_initial_mark) {
 312     // During initial-mark, we'll also explicitly mark all objects
 313     // we find to be self-forwarded on the next bitmap. So all
 314     // objects need to be below NTAMS.
 315     _next_top_at_mark_start = top();
 316     _next_marked_bytes = 0;
 317   } else if (during_conc_mark) {


 703                                  p2i(p), p2i(_containing_obj),
 704                                  p2i(from->bottom()), p2i(from->end()));
 705           print_object(gclog_or_tty, _containing_obj);
 706           gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region "
 707                                  "[" PTR_FORMAT ", " PTR_FORMAT ")",
 708                                  p2i(obj), p2i(to->bottom()), p2i(to->end()));
 709           print_object(gclog_or_tty, obj);
 710         }
 711         gclog_or_tty->print_cr("----------");
 712         gclog_or_tty->flush();
 713         _failures = true;
 714         failed = true;
 715         _n_failures++;
 716       }
 717 
 718       if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
 719         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
 720         HeapRegion* to   = _g1h->heap_region_containing(obj);
 721         if (from != NULL && to != NULL &&
 722             from != to &&
 723             !to->is_pinned()) {

 724           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
 725           jbyte cv_field = *_bs->byte_for_const(p);
 726           const jbyte dirty = CardTableModRefBS::dirty_card_val();
 727 
 728           bool is_bad = !(from->is_young()
 729                           || to->rem_set()->contains_reference(p)
 730                           || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
 731                               (_containing_obj->is_objArray() ?
 732                                   cv_field == dirty
 733                                : cv_obj == dirty || cv_field == dirty));
 734           if (is_bad) {
 735             MutexLockerEx x(ParGCRareEvent_lock,
 736                             Mutex::_no_safepoint_check_flag);
 737 
 738             if (!_failures) {
 739               gclog_or_tty->cr();
 740               gclog_or_tty->print_cr("----------");
 741             }
 742             gclog_or_tty->print_cr("Missing rem set entry:");
 743             gclog_or_tty->print_cr("Field " PTR_FORMAT " "


 815           }
 816           if (G1MaxVerifyFailures >= 0 &&
 817               vl_cl.n_failures() >= G1MaxVerifyFailures) {
 818             return;
 819           }
 820         }
 821       } else {
 822         gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj));
 823         *failures = true;
 824         return;
 825       }
 826     }
 827     prev_p = p;
 828     p += obj_size;
 829   }
 830 
 831   if (!is_young() && !is_empty()) {
 832     _offsets.verify();
 833   }
 834 
 835   if (p != top()) {
 836     gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
 837                            "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
 838     *failures = true;
 839     return;
 840   }
 841 
 842   HeapWord* the_end = end();
 843   assert(p == top(), "it should still hold");
 844   // Do some extra BOT consistency checking for addresses in the
 845   // range [top, end). BOT look-ups in this range should yield
 846   // top. No point in doing that if top == end (there's nothing there).
 847   if (p < the_end) {
 848     // Look up top
 849     HeapWord* addr_1 = p;
 850     HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
 851     if (b_start_1 != p) {
 852       gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
 853                              " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
 854                              p2i(addr_1), p2i(b_start_1), p2i(p));
 855       *failures = true;
 856       return;
 857     }
 858 
 859     // Look up top + 1
 860     HeapWord* addr_2 = p + 1;
 861     if (addr_2 < the_end) {
 862       HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
 863       if (b_start_2 != p) {




  50                                  G1ParPushHeapRSClosure* cl,
  51                                  CardTableModRefBS::PrecisionStyle precision) :
  52   DirtyCardToOopClosure(hr, cl, precision, NULL),
  53   _hr(hr), _rs_scan(cl), _g1(g1) { }
  54 
  55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
  56                                                    OopClosure* oc) :
  57   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
  58 
  59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
  60                                       HeapWord* bottom,
  61                                       HeapWord* top) {
  62   G1CollectedHeap* g1h = _g1;
  63   size_t oop_size;
  64   HeapWord* cur = bottom;
  65 
  66   // Start filtering what we add to the remembered set. If the object is
  67   // not considered dead, either because it is marked (in the mark bitmap)
  68   // or it was allocated after marking finished, then we add it. Otherwise
  69   // we can safely ignore the object.
  70   if (!g1h->is_obj_dead(oop(cur))) {
  71     oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
  72   } else {
  73     oop_size = _hr->block_size(cur);
  74   }
  75 
  76   cur += oop_size;
  77 
  78   if (cur < top) {
  79     oop cur_oop = oop(cur);
  80     oop_size = _hr->block_size(cur);
  81     HeapWord* next_obj = cur + oop_size;
  82     while (next_obj < top) {
  83       // Keep filtering the remembered set.
  84       if (!g1h->is_obj_dead(cur_oop)) {
  85         // Bottom lies entirely below top, so we can call the
  86         // non-memRegion version of oop_iterate below.
  87         cur_oop->oop_iterate(_rs_scan);
  88       }
  89       cur = next_obj;
  90       cur_oop = oop(cur);
  91       oop_size = _hr->block_size(cur);
  92       next_obj = cur + oop_size;
  93     }
  94 
  95     // Last object. Need to do dead-obj filtering here too.
  96     if (!g1h->is_obj_dead(oop(cur))) {
  97       oop(cur)->oop_iterate(_rs_scan, mr);
  98     }
  99   }
 100 }
 101 
 102 size_t HeapRegion::max_region_size() {
 103   return HeapRegionBounds::max_size();
 104 }
 105 
 106 size_t HeapRegion::min_region_size_in_words() {
 107   return HeapRegionBounds::min_size() >> LogHeapWordSize;
 108 }
 109 
 110 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 111   size_t region_size = G1HeapRegionSize;
 112   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
 113     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 114     region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
 115                        HeapRegionBounds::min_size());
 116   }


 145 
 146   guarantee(GrainWords == 0, "we should only set it once");
 147   GrainWords = GrainBytes >> LogHeapWordSize;
 148   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
 149 
 150   guarantee(CardsPerRegion == 0, "we should only set it once");
 151   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
 152 }
 153 
 154 void HeapRegion::reset_after_compaction() {
 155   G1OffsetTableContigSpace::reset_after_compaction();
 156   // After a compaction the mark bitmap is invalid, so we must
 157   // treat all objects as being inside the unmarked area.
 158   zero_marked_bytes();
 159   init_top_at_mark_start();
 160 }
 161 
 162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
 163   assert(_humongous_start_region == NULL,
 164          "we should have already filtered out humongous regions");


 165   assert(!in_collection_set(),
 166          "Should not clear heap region %u in the collection set", hrm_index());
 167 
 168   set_allocation_context(AllocationContext::system());
 169   set_young_index_in_cset(-1);
 170   uninstall_surv_rate_group();
 171   set_free();
 172   reset_pre_dummy_top();
 173 
 174   if (!par) {
 175     // If this is parallel, this will be done later.
 176     HeapRegionRemSet* hrrs = rem_set();
 177     if (locked) {
 178       hrrs->clear_locked();
 179     } else {
 180       hrrs->clear();
 181     }
 182   }
 183   zero_marked_bytes();
 184 


 194   hrrs->clear();
 195   CardTableModRefBS* ct_bs =
 196     barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
 197   ct_bs->clear(MemRegion(bottom(), end()));
 198 }
 199 
 200 void HeapRegion::calc_gc_efficiency() {
 201   // GC efficiency is the ratio of how much space would be
 202   // reclaimed over how long we predict it would take to reclaim it.
 203   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 204   G1CollectorPolicy* g1p = g1h->g1_policy();
 205 
 206   // Retrieve a prediction of the elapsed time for this region for
 207   // a mixed gc because the region will only be evacuated during a
 208   // mixed gc.
 209   double region_elapsed_time_ms =
 210     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
 211   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 212 }
 213 
 214 void HeapRegion::set_starts_humongous(HeapWord* obj_top) {
 215   assert(!is_humongous(), "sanity / pre-condition");


 216   assert(top() == bottom(), "should be empty");

 217 
 218   _type.set_starts_humongous();
 219   _humongous_start_region = this;
 220 
 221   _offsets.set_for_starts_humongous(obj_top);

 222 }
 223 
 224 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
 225   assert(!is_humongous(), "sanity / pre-condition");


 226   assert(top() == bottom(), "should be empty");
 227   assert(first_hr->is_starts_humongous(), "pre-condition");
 228 
 229   _type.set_continues_humongous();
 230   _humongous_start_region = first_hr;
 231 }
 232 
 233 void HeapRegion::clear_humongous() {
 234   assert(is_humongous(), "pre-condition");
 235 












 236   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
 237   _humongous_start_region = NULL;
 238 }
 239 
 240 HeapRegion::HeapRegion(uint hrm_index,
 241                        G1BlockOffsetSharedArray* sharedOffsetArray,
 242                        MemRegion mr) :
 243     G1OffsetTableContigSpace(sharedOffsetArray, mr),
 244     _hrm_index(hrm_index),
 245     _allocation_context(AllocationContext::system()),
 246     _humongous_start_region(NULL),
 247     _next_in_special_set(NULL),
 248     _evacuation_failed(false),
 249     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
 250     _next_young_region(NULL),
 251     _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
 252 #ifdef ASSERT
 253     _containing_set(NULL),
 254 #endif // ASSERT
 255      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 256     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 257     _predicted_bytes_to_copy(0)
 258 {
 259   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
 260   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 261 
 262   initialize(mr);
 263 }
 264 
 265 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 266   assert(_rem_set->is_empty(), "Remembered set must be empty");
 267 
 268   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 269 
 270   hr_clear(false /*par*/, false /*clear_space*/);
 271   set_top(bottom());
 272   record_timestamp();





 273 }
 274 
 275 CompactibleSpace* HeapRegion::next_compaction_space() const {
 276   return G1CollectedHeap::heap()->next_compaction_region(this);
 277 }
 278 
 279 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 280                                                     bool during_conc_mark) {
 281   // We always recreate the prev marking info and we'll explicitly
 282   // mark all objects we find to be self-forwarded on the prev
 283   // bitmap. So all objects need to be below PTAMS.
 284   _prev_marked_bytes = 0;
 285 
 286   if (during_initial_mark) {
 287     // During initial-mark, we'll also explicitly mark all objects
 288     // we find to be self-forwarded on the next bitmap. So all
 289     // objects need to be below NTAMS.
 290     _next_top_at_mark_start = top();
 291     _next_marked_bytes = 0;
 292   } else if (during_conc_mark) {


 678                                  p2i(p), p2i(_containing_obj),
 679                                  p2i(from->bottom()), p2i(from->end()));
 680           print_object(gclog_or_tty, _containing_obj);
 681           gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region "
 682                                  "[" PTR_FORMAT ", " PTR_FORMAT ")",
 683                                  p2i(obj), p2i(to->bottom()), p2i(to->end()));
 684           print_object(gclog_or_tty, obj);
 685         }
 686         gclog_or_tty->print_cr("----------");
 687         gclog_or_tty->flush();
 688         _failures = true;
 689         failed = true;
 690         _n_failures++;
 691       }
 692 
 693       if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
 694         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
 695         HeapRegion* to   = _g1h->heap_region_containing(obj);
 696         if (from != NULL && to != NULL &&
 697             from != to &&
 698             !to->is_pinned() &&
 699             !from->is_continues_humongous()) {
 700           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
 701           jbyte cv_field = *_bs->byte_for_const(p);
 702           const jbyte dirty = CardTableModRefBS::dirty_card_val();
 703 
 704           bool is_bad = !(from->is_young()
 705                           || to->rem_set()->contains_reference(p)
 706                           || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
 707                               (_containing_obj->is_objArray() ?
 708                                   cv_field == dirty
 709                                : cv_obj == dirty || cv_field == dirty));
 710           if (is_bad) {
 711             MutexLockerEx x(ParGCRareEvent_lock,
 712                             Mutex::_no_safepoint_check_flag);
 713 
 714             if (!_failures) {
 715               gclog_or_tty->cr();
 716               gclog_or_tty->print_cr("----------");
 717             }
 718             gclog_or_tty->print_cr("Missing rem set entry:");
 719             gclog_or_tty->print_cr("Field " PTR_FORMAT " "


 791           }
 792           if (G1MaxVerifyFailures >= 0 &&
 793               vl_cl.n_failures() >= G1MaxVerifyFailures) {
 794             return;
 795           }
 796         }
 797       } else {
 798         gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj));
 799         *failures = true;
 800         return;
 801       }
 802     }
 803     prev_p = p;
 804     p += obj_size;
 805   }
 806 
 807   if (!is_young() && !is_empty()) {
 808     _offsets.verify();
 809   }
 810 
 811   if (!is_region_humongous && p != top()) {
 812     gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
 813                            "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
 814     *failures = true;
 815     return;
 816   }
 817 
 818   HeapWord* the_end = end();

 819   // Do some extra BOT consistency checking for addresses in the
 820   // range [top, end). BOT look-ups in this range should yield
 821   // top. No point in doing that if top == end (there's nothing there).
 822   if (p < the_end) {
 823     // Look up top
 824     HeapWord* addr_1 = p;
 825     HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
 826     if (b_start_1 != p) {
 827       gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
 828                              " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
 829                              p2i(addr_1), p2i(b_start_1), p2i(p));
 830       *failures = true;
 831       return;
 832     }
 833 
 834     // Look up top + 1
 835     HeapWord* addr_2 = p + 1;
 836     if (addr_2 < the_end) {
 837       HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
 838       if (b_start_2 != p) {


< prev index next >