1 /*
  2  * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "gc/g1/g1Allocator.inline.hpp"
 28 #include "gc/g1/g1CollectedHeap.inline.hpp"
 29 #include "gc/g1/g1ConcurrentMarkThread.hpp"
 30 #include "gc/g1/g1HeapVerifier.hpp"
 31 #include "gc/g1/g1Policy.hpp"
 32 #include "gc/g1/g1RemSet.hpp"
 33 #include "gc/g1/g1RootProcessor.hpp"
 34 #include "gc/g1/heapRegion.inline.hpp"
 35 #include "gc/g1/heapRegionRemSet.hpp"
 36 #include "gc/g1/g1StringDedup.hpp"
 37 #include "logging/log.hpp"
 38 #include "logging/logStream.hpp"
 39 #include "memory/iterator.inline.hpp"
 40 #include "memory/resourceArea.hpp"
 41 #include "memory/universe.hpp"
 42 #include "oops/access.inline.hpp"
 43 #include "oops/compressedOops.inline.hpp"
 44 #include "oops/oop.inline.hpp"
 45 #include "runtime/handles.inline.hpp"
 46 
 47 int G1HeapVerifier::_enabled_verification_types = G1HeapVerifier::G1VerifyAll;
 48 
 49 class VerifyRootsClosure: public OopClosure {
 50 private:
 51   G1CollectedHeap* _g1h;
 52   VerifyOption     _vo;
 53   bool             _failures;
 54 public:
 55   // _vo == UsePrevMarking -> use "prev" marking information,
 56   // _vo == UseNextMarking -> use "next" marking information,
 57   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
 58   VerifyRootsClosure(VerifyOption vo) :
 59     _g1h(G1CollectedHeap::heap()),
 60     _vo(vo),
 61     _failures(false) { }
 62 
 63   bool failures() { return _failures; }
 64 
 65   template <class T> void do_oop_work(T* p) {
 66     T heap_oop = RawAccess<>::oop_load(p);
 67     if (!CompressedOops::is_null(heap_oop)) {
 68       oop obj = CompressedOops::decode_not_null(heap_oop);
 69       if (_g1h->is_obj_dead_cond(obj, _vo)) {
 70         Log(gc, verify) log;
 71         log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT " vo %d", p2i(p), p2i(obj), _vo);
 72         ResourceMark rm;
 73         LogStream ls(log.error());
 74         obj->print_on(&ls);
 75         _failures = true;
 76       }
 77     }
 78   }
 79 
 80   void do_oop(oop* p)       { do_oop_work(p); }
 81   void do_oop(narrowOop* p) { do_oop_work(p); }
 82 };
 83 
 84 class G1VerifyCodeRootOopClosure: public OopClosure {
 85   G1CollectedHeap* _g1h;
 86   OopClosure* _root_cl;
 87   nmethod* _nm;
 88   VerifyOption _vo;
 89   bool _failures;
 90 
 91   template <class T> void do_oop_work(T* p) {
 92     // First verify that this root is live
 93     _root_cl->do_oop(p);
 94 
 95     if (!G1VerifyHeapRegionCodeRoots) {
 96       // We're not verifying the code roots attached to heap region.
 97       return;
 98     }
 99 
100     // Don't check the code roots during marking verification in a full GC
101     if (_vo == VerifyOption_G1UseFullMarking) {
102       return;
103     }
104 
105     // Now verify that the current nmethod (which contains p) is
106     // in the code root list of the heap region containing the
107     // object referenced by p.
108 
109     T heap_oop = RawAccess<>::oop_load(p);
110     if (!CompressedOops::is_null(heap_oop)) {
111       oop obj = CompressedOops::decode_not_null(heap_oop);
112 
113       // Now fetch the region containing the object
114       HeapRegion* hr = _g1h->heap_region_containing(obj);
115       HeapRegionRemSet* hrrs = hr->rem_set();
116       // Verify that the strong code root list for this region
117       // contains the nmethod
118       if (!hrrs->strong_code_roots_list_contains(_nm)) {
119         log_error(gc, verify)("Code root location " PTR_FORMAT " "
120                               "from nmethod " PTR_FORMAT " not in strong "
121                               "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
122                               p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
123         _failures = true;
124       }
125     }
126   }
127 
128 public:
129   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
130     _g1h(g1h), _root_cl(root_cl), _nm(NULL), _vo(vo), _failures(false) {}
131 
132   void do_oop(oop* p) { do_oop_work(p); }
133   void do_oop(narrowOop* p) { do_oop_work(p); }
134 
135   void set_nmethod(nmethod* nm) { _nm = nm; }
136   bool failures() { return _failures; }
137 };
138 
139 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
140   G1VerifyCodeRootOopClosure* _oop_cl;
141 
142 public:
143   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
144     _oop_cl(oop_cl) {}
145 
146   void do_code_blob(CodeBlob* cb) {
147     nmethod* nm = cb->as_nmethod_or_null();
148     if (nm != NULL) {
149       _oop_cl->set_nmethod(nm);
150       nm->oops_do(_oop_cl);
151     }
152   }
153 };
154 
155 class YoungRefCounterClosure : public OopClosure {
156   G1CollectedHeap* _g1h;
157   int              _count;
158  public:
159   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
160   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
161   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
162 
163   int count() { return _count; }
164   void reset_count() { _count = 0; };
165 };
166 
167 class VerifyCLDClosure: public CLDClosure {
168   YoungRefCounterClosure _young_ref_counter_closure;
169   OopClosure *_oop_closure;
170  public:
171   VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
172   void do_cld(ClassLoaderData* cld) {
173     cld->oops_do(_oop_closure, ClassLoaderData::_claim_none);
174 
175     _young_ref_counter_closure.reset_count();
176     cld->oops_do(&_young_ref_counter_closure, ClassLoaderData::_claim_none);
177     if (_young_ref_counter_closure.count() > 0) {
178       guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty.", p2i(cld), _young_ref_counter_closure.count());
179     }
180   }
181 };
182 
183 class VerifyLivenessOopClosure: public BasicOopIterateClosure {
184   G1CollectedHeap* _g1h;
185   VerifyOption _vo;
186   oop _o;
187 public:
188   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo, oop o):
189     _g1h(g1h), _vo(vo), _o(o)
190   { }
191   void do_oop(narrowOop *p) { do_oop_work(p); }
192   void do_oop(      oop *p) { do_oop_work(p); }
193 
194   template <class T> void do_oop_work(T *p) {
195     oop obj = RawAccess<>::oop_load(p);
196     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
197               "vo %d Dead object " PTR_FORMAT " (%s) referenced by a not dead object " PTR_FORMAT " (%s) _g1h->is_obj_dead_cond(obj, _vo) %d marked %d since prev mark %d since next mark %d",
198             _vo,
199               p2i(obj), _g1h->heap_region_containing(obj)->get_short_type_str(),
200               p2i(_o), _g1h->heap_region_containing(_o)->get_short_type_str(),
201             _g1h->is_obj_dead_cond(obj, _vo),
202             _g1h->concurrent_mark()->prev_mark_bitmap()->is_marked(_o),
203             _g1h->heap_region_containing(_o)->obj_allocated_since_prev_marking(_o),
204             _g1h->heap_region_containing(_o)->obj_allocated_since_next_marking(_o)
205             );
206   }
207 };
208 
209 class VerifyObjsInRegionClosure: public ObjectClosure {
210 private:
211   G1CollectedHeap* _g1h;
212   size_t _live_bytes;
213   HeapRegion *_hr;
214   VerifyOption _vo;
215 public:
216   // _vo == UsePrevMarking -> use "prev" marking information,
217   // _vo == UseNextMarking -> use "next" marking information,
218   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
219   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
220     : _live_bytes(0), _hr(hr), _vo(vo) {
221     _g1h = G1CollectedHeap::heap();
222   }
223   void do_object(oop o) {
224     VerifyLivenessOopClosure isLive(_g1h, _vo, o);
225     assert(o != NULL, "Huh?");
226     if (!_g1h->is_obj_dead_cond(o, _vo)) {
227       // If the object is alive according to the full gc mark,
228       // then verify that the marking information agrees.
229       // Note we can't verify the contra-positive of the
230       // above: if the object is dead (according to the mark
231       // word), it may not be marked, or may have been marked
232       // but has since became dead, or may have been allocated
233       // since the last marking.
234       if (_vo == VerifyOption_G1UseFullMarking) {
235         guarantee(!_g1h->is_obj_dead(o), "Full GC marking and concurrent mark mismatch");
236       }
237 
238       o->oop_iterate(&isLive);
239       if (!_hr->obj_allocated_since_prev_marking(o)) {
240         size_t obj_size = o->size();    // Make sure we don't overflow
241         _live_bytes += (obj_size * HeapWordSize);
242       }
243     }
244   }
245   size_t live_bytes() { return _live_bytes; }
246 };
247 
248 class VerifyArchiveOopClosure: public BasicOopIterateClosure {
249   HeapRegion* _hr;
250 public:
251   VerifyArchiveOopClosure(HeapRegion *hr)
252     : _hr(hr) { }
253   void do_oop(narrowOop *p) { do_oop_work(p); }
254   void do_oop(      oop *p) { do_oop_work(p); }
255 
256   template <class T> void do_oop_work(T *p) {
257     oop obj = RawAccess<>::oop_load(p);
258 
259     if (_hr->is_open_archive()) {
260       guarantee(obj == NULL || G1ArchiveAllocator::is_archived_object(obj),
261                 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
262                 p2i(p), p2i(obj));
263     } else {
264       assert(_hr->is_closed_archive(), "should be closed archive region");
265       guarantee(obj == NULL || G1ArchiveAllocator::is_closed_archive_object(obj),
266                 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
267                 p2i(p), p2i(obj));
268     }
269   }
270 };
271 
272 class VerifyObjectInArchiveRegionClosure: public ObjectClosure {
273   HeapRegion* _hr;
274 public:
275   VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose)
276     : _hr(hr) { }
277   // Verify that all object pointers are to archive regions.
278   void do_object(oop o) {
279     VerifyArchiveOopClosure checkOop(_hr);
280     assert(o != NULL, "Should not be here for NULL oops");
281     o->oop_iterate(&checkOop);
282   }
283 };
284 
285 // Should be only used at CDS dump time
286 class VerifyReadyForArchivingRegionClosure : public HeapRegionClosure {
287   bool _seen_free;
288   bool _has_holes;
289   bool _has_unexpected_holes;
290   bool _has_humongous;
291 public:
292   bool has_holes() {return _has_holes;}
293   bool has_unexpected_holes() {return _has_unexpected_holes;}
294   bool has_humongous() {return _has_humongous;}
295 
296   VerifyReadyForArchivingRegionClosure() : HeapRegionClosure() {
297     _seen_free = false;
298     _has_holes = false;
299     _has_unexpected_holes = false;
300     _has_humongous = false;
301   }
302   virtual bool do_heap_region(HeapRegion* hr) {
303     const char* hole = "";
304 
305     if (hr->is_free()) {
306       _seen_free = true;
307     } else {
308       if (_seen_free) {
309         _has_holes = true;
310         if (hr->is_humongous()) {
311           hole = " hole";
312         } else {
313           _has_unexpected_holes = true;
314           hole = " hole **** unexpected ****";
315         }
316       }
317     }
318     if (hr->is_humongous()) {
319       _has_humongous = true;
320     }
321     log_info(gc, region, cds)("HeapRegion " INTPTR_FORMAT " %s%s", p2i(hr->bottom()), hr->get_type_str(), hole);
322     return false;
323   }
324 };
325 
326 // We want all used regions to be moved to the bottom-end of the heap, so we have
327 // a contiguous range of free regions at the top end of the heap. This way, we can
328 // avoid fragmentation while allocating the archive regions.
329 //
330 // Before calling this, a full GC should have been executed with a single worker thread,
331 // so that no old regions would be moved to the middle of the heap.
332 void G1HeapVerifier::verify_ready_for_archiving() {
333   VerifyReadyForArchivingRegionClosure cl;
334   G1CollectedHeap::heap()->heap_region_iterate(&cl);
335   if (cl.has_holes()) {
336     log_warning(gc, verify)("All free regions should be at the top end of the heap, but"
337                             " we found holes. This is probably caused by (unmovable) humongous"
338                             " allocations or active GCLocker, and may lead to fragmentation while"
339                             " writing archive heap memory regions.");
340   }
341   if (cl.has_humongous()) {
342     log_warning(gc, verify)("(Unmovable) humongous regions have been found and"
343                             " may lead to fragmentation while"
344                             " writing archive heap memory regions.");
345   }
346 }
347 
348 class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
349   virtual bool do_heap_region(HeapRegion* r) {
350    if (r->is_archive()) {
351       VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
352       r->object_iterate(&verify_oop_pointers);
353     }
354     return false;
355   }
356 };
357 
358 void G1HeapVerifier::verify_archive_regions() {
359   G1CollectedHeap*  g1h = G1CollectedHeap::heap();
360   VerifyArchivePointerRegionClosure cl;
361   g1h->heap_region_iterate(&cl);
362 }
363 
364 class VerifyRegionClosure: public HeapRegionClosure {
365 private:
366   bool             _par;
367   VerifyOption     _vo;
368   bool             _failures;
369 public:
370   // _vo == UsePrevMarking -> use "prev" marking information,
371   // _vo == UseNextMarking -> use "next" marking information,
372   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
373   VerifyRegionClosure(bool par, VerifyOption vo)
374     : _par(par),
375       _vo(vo),
376       _failures(false) {}
377 
378   bool failures() {
379     return _failures;
380   }
381 
382   bool do_heap_region(HeapRegion* r) {
383     guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u", r->hrm_index(), r->index_in_opt_cset());
384     guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
385     // Humongous and old regions regions might be of any state, so can't check here.
386     guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
387     // Verify that the continues humongous regions' remembered set state matches the
388     // one from the starts humongous region.
389     if (r->is_continues_humongous()) {
390       if (r->rem_set()->get_state_str() != r->humongous_start_region()->rem_set()->get_state_str()) {
391          log_error(gc, verify)("Remset states differ: Region %u (%s) remset %s with starts region %u (%s) remset %s",
392                                r->hrm_index(),
393                                r->get_short_type_str(),
394                                r->rem_set()->get_state_str(),
395                                r->humongous_start_region()->hrm_index(),
396                                r->humongous_start_region()->get_short_type_str(),
397                                r->humongous_start_region()->rem_set()->get_state_str());
398          _failures = true;
399       }
400     }
401     // For archive regions, verify there are no heap pointers to
402     // non-pinned regions. For all others, verify liveness info.
403     if (r->is_closed_archive()) {
404       VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
405       r->object_iterate(&verify_oop_pointers);
406       return true;
407     } else if (r->is_open_archive()) {
408       VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo);
409       r->object_iterate(&verify_open_archive_oop);
410       return true;
411     } else if (!r->is_continues_humongous()) {
412       bool failures = false;
413       r->verify(_vo, &failures);
414       if (failures) {
415         _failures = true;
416       } else if (!r->is_starts_humongous()) {
417         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
418         r->object_iterate(&not_dead_yet_cl);
419         if (_vo != VerifyOption_G1UseNextMarking) {
420           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
421             log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
422                                   p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
423             _failures = true;
424           }
425         } else {
426           // When vo == UseNextMarking we cannot currently do a sanity
427           // check on the live bytes as the calculation has not been
428           // finalized yet.
429         }
430       }
431     }
432     return false; // stop the region iteration if we hit a failure
433   }
434 };
435 
436 // This is the task used for parallel verification of the heap regions
437 
438 class G1ParVerifyTask: public AbstractGangTask {
439 private:
440   G1CollectedHeap*  _g1h;
441   VerifyOption      _vo;
442   bool              _failures;
443   HeapRegionClaimer _hrclaimer;
444 
445 public:
446   // _vo == UsePrevMarking -> use "prev" marking information,
447   // _vo == UseNextMarking -> use "next" marking information,
448   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
449   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
450       AbstractGangTask("Parallel verify task"),
451       _g1h(g1h),
452       _vo(vo),
453       _failures(false),
454       _hrclaimer(g1h->workers()->active_workers()) {}
455 
456   bool failures() {
457     return _failures;
458   }
459 
460   void work(uint worker_id) {
461     VerifyRegionClosure blk(true, _vo);
462     _g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
463     if (blk.failures()) {
464       _failures = true;
465     }
466   }
467 };
468 
469 void G1HeapVerifier::enable_verification_type(G1VerifyType type) {
470   // First enable will clear _enabled_verification_types.
471   if (_enabled_verification_types == G1VerifyAll) {
472     _enabled_verification_types = type;
473   } else {
474     _enabled_verification_types |= type;
475   }
476 }
477 
478 bool G1HeapVerifier::should_verify(G1VerifyType type) {
479   return (_enabled_verification_types & type) == type;
480 }
481 
482 void G1HeapVerifier::verify(VerifyOption vo) {
483   if (!SafepointSynchronize::is_at_safepoint()) {
484     log_info(gc, verify)("Skipping verification. Not at safepoint.");
485   }
486 
487   assert(Thread::current()->is_VM_thread(),
488          "Expected to be executed serially by the VM thread at this point");
489 
490   log_debug(gc, verify)("Roots");
491   VerifyRootsClosure rootsCl(vo);
492   VerifyCLDClosure cldCl(_g1h, &rootsCl);
493 
494   // We apply the relevant closures to all the oops in the
495   // system dictionary, class loader data graph, the string table
496   // and the nmethods in the code cache.
497   G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
498   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
499 
500   {
501     G1RootProcessor root_processor(_g1h, 1);
502     root_processor.process_all_roots(&rootsCl, &cldCl, &blobsCl);
503   }
504 
505   bool failures = rootsCl.failures() || codeRootsCl.failures();
506 
507   if (!_g1h->policy()->collector_state()->in_full_gc()) {
508     // If we're verifying during a full GC then the region sets
509     // will have been torn down at the start of the GC. Therefore
510     // verifying the region sets will fail. So we only verify
511     // the region sets when not in a full GC.
512     log_debug(gc, verify)("HeapRegionSets");
513     verify_region_sets();
514   }
515 
516   log_debug(gc, verify)("HeapRegions");
517   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
518 
519     G1ParVerifyTask task(_g1h, vo);
520     _g1h->workers()->run_task(&task);
521     if (task.failures()) {
522       failures = true;
523     }
524 
525   } else {
526     VerifyRegionClosure blk(false, vo);
527     _g1h->heap_region_iterate(&blk);
528     if (blk.failures()) {
529       failures = true;
530     }
531   }
532 
533   if (G1StringDedup::is_enabled()) {
534     log_debug(gc, verify)("StrDedup");
535     G1StringDedup::verify();
536   }
537 
538   if (failures) {
539     log_error(gc, verify)("Heap after failed verification (kind %d):", vo);
540     // It helps to have the per-region information in the output to
541     // help us track down what went wrong. This is why we call
542     // print_extended_on() instead of print_on().
543     Log(gc, verify) log;
544     ResourceMark rm;
545     LogStream ls(log.error());
546     _g1h->print_extended_on(&ls);
547   }
548   guarantee(!failures, "there should not have been any failures");
549 }
550 
551 // Heap region set verification
552 
553 class VerifyRegionListsClosure : public HeapRegionClosure {
554 private:
555   HeapRegionSet*   _old_set;
556   HeapRegionSet*   _archive_set;
557   HeapRegionSet*   _humongous_set;
558   HeapRegionManager* _hrm;
559 
560 public:
561   uint _old_count;
562   uint _archive_count;
563   uint _humongous_count;
564   uint _free_count;
565 
566   VerifyRegionListsClosure(HeapRegionSet* old_set,
567                            HeapRegionSet* archive_set,
568                            HeapRegionSet* humongous_set,
569                            HeapRegionManager* hrm) :
570     _old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm),
571     _old_count(), _archive_count(), _humongous_count(), _free_count(){ }
572 
573   bool do_heap_region(HeapRegion* hr) {
574     if (hr->is_young()) {
575       // TODO
576     } else if (hr->is_humongous()) {
577       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
578       _humongous_count++;
579     } else if (hr->is_empty()) {
580       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
581       _free_count++;
582     } else if (hr->is_archive()) {
583       assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index());
584       _archive_count++;
585     } else if (hr->is_old()) {
586       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
587       _old_count++;
588     } else {
589       // There are no other valid region types. Check for one invalid
590       // one we can identify: pinned without old or humongous set.
591       assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
592       ShouldNotReachHere();
593     }
594     return false;
595   }
596 
597   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
598     guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
599     guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count);
600     guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
601     guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
602   }
603 };
604 
605 void G1HeapVerifier::verify_region_sets() {
606   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
607 
608   // First, check the explicit lists.
609   _g1h->_hrm->verify();
610 
611   // Finally, make sure that the region accounting in the lists is
612   // consistent with what we see in the heap.
613 
614   VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
615   _g1h->heap_region_iterate(&cl);
616   cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
617 }
618 
619 void G1HeapVerifier::prepare_for_verify() {
620   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
621     _g1h->ensure_parsability(false);
622   }
623 }
624 
625 double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* msg) {
626   double verify_time_ms = 0.0;
627 
628   if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) {
629     double verify_start = os::elapsedTime();
630     prepare_for_verify();
631     Universe::verify(vo, msg);
632     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
633   }
634 
635   return verify_time_ms;
636 }
637 
638 void G1HeapVerifier::verify_before_gc(G1VerifyType type) {
639   if (VerifyBeforeGC) {
640     double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "Before GC");
641     _g1h->phase_times()->record_verify_before_time_ms(verify_time_ms);
642   }
643 }
644 
645 void G1HeapVerifier::verify_after_gc(G1VerifyType type) {
646   if (VerifyAfterGC) {
647     double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "After GC");
648     _g1h->phase_times()->record_verify_after_time_ms(verify_time_ms);
649   }
650 }
651 
652 
653 #ifndef PRODUCT
654 class G1VerifyCardTableCleanup: public HeapRegionClosure {
655   G1HeapVerifier* _verifier;
656 public:
657   G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
658     : _verifier(verifier) { }
659   virtual bool do_heap_region(HeapRegion* r) {
660     if (r->is_survivor()) {
661       _verifier->verify_dirty_region(r);
662     } else {
663       _verifier->verify_not_dirty_region(r);
664     }
665     return false;
666   }
667 };
668 
669 void G1HeapVerifier::verify_card_table_cleanup() {
670   if (G1VerifyCTCleanup || VerifyAfterGC) {
671     G1VerifyCardTableCleanup cleanup_verifier(this);
672     _g1h->heap_region_iterate(&cleanup_verifier);
673   }
674 }
675 
676 void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
677   // All of the region should be clean.
678   G1CardTable* ct = _g1h->card_table();
679   MemRegion mr(hr->bottom(), hr->end());
680   ct->verify_not_dirty_region(mr);
681 }
682 
683 void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
684   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
685   // dirty allocated blocks as they allocate them. The thread that
686   // retires each region and replaces it with a new one will do a
687   // maximal allocation to fill in [pre_dummy_top(),end()] but will
688   // not dirty that area (one less thing to have to do while holding
689   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
690   // is dirty.
691   G1CardTable* ct = _g1h->card_table();
692   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
693   if (hr->is_young()) {
694     ct->verify_g1_young_region(mr);
695   } else {
696     ct->verify_dirty_region(mr);
697   }
698 }
699 
700 class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
701 private:
702   G1HeapVerifier* _verifier;
703 public:
704   G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
705   virtual bool do_heap_region(HeapRegion* r) {
706     _verifier->verify_dirty_region(r);
707     return false;
708   }
709 };
710 
711 void G1HeapVerifier::verify_dirty_young_regions() {
712   G1VerifyDirtyYoungListClosure cl(this);
713   _g1h->collection_set()->iterate(&cl);
714 }
715 
716 bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap,
717                                                HeapWord* tams, HeapWord* end) {
718   guarantee(tams <= end,
719             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
720   HeapWord* result = bitmap->get_next_marked_addr(tams, end);
721   if (result < end) {
722     log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
723     log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
724     return false;
725   }
726   return true;
727 }
728 
729 bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
730   const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap();
731   const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap();
732 
733   HeapWord* ptams  = hr->prev_top_at_mark_start();
734   HeapWord* ntams  = hr->next_top_at_mark_start();
735   HeapWord* end    = hr->end();
736 
737   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
738 
739   bool res_n = true;
740   // We cannot verify the next bitmap while we are about to clear it.
741   if (!_g1h->collector_state()->clearing_next_bitmap()) {
742     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
743   }
744   if (!res_p || !res_n) {
745     log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
746     log_error(gc, verify)("#### Caller: %s", caller);
747     return false;
748   }
749   return true;
750 }
751 
752 void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
753   if (!G1VerifyBitmaps) {
754     return;
755   }
756 
757   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
758 }
759 
760 class G1VerifyBitmapClosure : public HeapRegionClosure {
761 private:
762   const char* _caller;
763   G1HeapVerifier* _verifier;
764   bool _failures;
765 
766 public:
767   G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
768     _caller(caller), _verifier(verifier), _failures(false) { }
769 
770   bool failures() { return _failures; }
771 
772   virtual bool do_heap_region(HeapRegion* hr) {
773     bool result = _verifier->verify_bitmaps(_caller, hr);
774     if (!result) {
775       _failures = true;
776     }
777     return false;
778   }
779 };
780 
781 void G1HeapVerifier::check_bitmaps(const char* caller) {
782   if (!G1VerifyBitmaps) {
783     return;
784   }
785 
786   G1VerifyBitmapClosure cl(caller, this);
787   _g1h->heap_region_iterate(&cl);
788   guarantee(!cl.failures(), "bitmap verification");
789 }
790 
791 class G1CheckRegionAttrTableClosure : public HeapRegionClosure {
792 private:
793   bool _failures;
794 
795 public:
796   G1CheckRegionAttrTableClosure() : HeapRegionClosure(), _failures(false) { }
797 
798   virtual bool do_heap_region(HeapRegion* hr) {
799     uint i = hr->hrm_index();
800     G1HeapRegionAttr region_attr = (G1HeapRegionAttr) G1CollectedHeap::heap()->_region_attr.get_by_index(i);
801     if (hr->is_humongous()) {
802       if (hr->in_collection_set()) {
803         log_error(gc, verify)("## humongous region %u in CSet", i);
804         _failures = true;
805         return true;
806       }
807       if (region_attr.is_in_cset()) {
808         log_error(gc, verify)("## inconsistent region attr type %s for humongous region %u", region_attr.get_type_str(), i);
809         _failures = true;
810         return true;
811       }
812       if (hr->is_continues_humongous() && region_attr.is_humongous()) {
813         log_error(gc, verify)("## inconsistent region attr type %s for continues humongous region %u", region_attr.get_type_str(), i);
814         _failures = true;
815         return true;
816       }
817     } else {
818       if (region_attr.is_humongous()) {
819         log_error(gc, verify)("## inconsistent region attr type %s for non-humongous region %u", region_attr.get_type_str(), i);
820         _failures = true;
821         return true;
822       }
823       if (hr->in_collection_set() != region_attr.is_in_cset()) {
824         log_error(gc, verify)("## in CSet %d / region attr type %s inconsistency for region %u",
825                              hr->in_collection_set(), region_attr.get_type_str(), i);
826         _failures = true;
827         return true;
828       }
829       if (region_attr.is_in_cset()) {
830         if (hr->is_archive()) {
831           log_error(gc, verify)("## is_archive in collection set for region %u", i);
832           _failures = true;
833           return true;
834         }
835         if (hr->is_young() != (region_attr.is_young())) {
836           log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u",
837                                hr->is_young(), region_attr.get_type_str(), i);
838           _failures = true;
839           return true;
840         }
841         if (hr->is_old() != (region_attr.is_old())) {
842           log_error(gc, verify)("## is_old %d / region attr type %s inconsistency for region %u",
843                                hr->is_old(), region_attr.get_type_str(), i);
844           _failures = true;
845           return true;
846         }
847       }
848     }
849     return false;
850   }
851 
852   bool failures() const { return _failures; }
853 };
854 
855 bool G1HeapVerifier::check_region_attr_table() {
856   G1CheckRegionAttrTableClosure cl;
857   _g1h->_hrm->iterate(&cl);
858   return !cl.failures();
859 }
860 #endif // PRODUCT