1 /*
   2  * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentMarkThread.hpp"
  27 #include "gc/g1/g1Allocator.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1HeapVerifier.hpp"
  31 #include "gc/g1/g1Policy.hpp"
  32 #include "gc/g1/g1RemSet.hpp"
  33 #include "gc/g1/g1RootProcessor.hpp"
  34 #include "gc/g1/heapRegion.hpp"
  35 #include "gc/g1/heapRegion.inline.hpp"
  36 #include "gc/g1/heapRegionRemSet.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/oop.inline.hpp"
  42 
  43 class VerifyRootsClosure: public OopClosure {
  44 private:
  45   G1CollectedHeap* _g1h;
  46   VerifyOption     _vo;
  47   bool             _failures;
  48 public:
  49   // _vo == UsePrevMarking -> use "prev" marking information,
  50   // _vo == UseNextMarking -> use "next" marking information,
  51   // _vo == UseMarkWord    -> use mark word from object header.
  52   VerifyRootsClosure(VerifyOption vo) :
  53     _g1h(G1CollectedHeap::heap()),
  54     _vo(vo),
  55     _failures(false) { }
  56 
  57   bool failures() { return _failures; }
  58 
  59   template <class T> void do_oop_nv(T* p) {
  60     T heap_oop = oopDesc::load_heap_oop(p);
  61     if (!oopDesc::is_null(heap_oop)) {
  62       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  63       if (_g1h->is_obj_dead_cond(obj, _vo)) {
  64         Log(gc, verify) log;
  65         log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
  66         if (_vo == VerifyOption_G1UseMarkWord) {
  67           log.error("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
  68         }
  69         ResourceMark rm;
  70         // Unconditional write?
  71         LogStream ls(log.error());
  72         obj->print_on(&ls);
  73         _failures = true;
  74       }
  75     }
  76   }
  77 
  78   void do_oop(oop* p)       { do_oop_nv(p); }
  79   void do_oop(narrowOop* p) { do_oop_nv(p); }
  80 };
  81 
  82 class G1VerifyCodeRootOopClosure: public OopClosure {
  83   G1CollectedHeap* _g1h;
  84   OopClosure* _root_cl;
  85   nmethod* _nm;
  86   VerifyOption _vo;
  87   bool _failures;
  88 
  89   template <class T> void do_oop_work(T* p) {
  90     // First verify that this root is live
  91     _root_cl->do_oop(p);
  92 
  93     if (!G1VerifyHeapRegionCodeRoots) {
  94       // We're not verifying the code roots attached to heap region.
  95       return;
  96     }
  97 
  98     // Don't check the code roots during marking verification in a full GC
  99     if (_vo == VerifyOption_G1UseMarkWord) {
 100       return;
 101     }
 102 
 103     // Now verify that the current nmethod (which contains p) is
 104     // in the code root list of the heap region containing the
 105     // object referenced by p.
 106 
 107     T heap_oop = oopDesc::load_heap_oop(p);
 108     if (!oopDesc::is_null(heap_oop)) {
 109       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 110 
 111       // Now fetch the region containing the object
 112       HeapRegion* hr = _g1h->heap_region_containing(obj);
 113       HeapRegionRemSet* hrrs = hr->rem_set();
 114       // Verify that the strong code root list for this region
 115       // contains the nmethod
 116       if (!hrrs->strong_code_roots_list_contains(_nm)) {
 117         log_error(gc, verify)("Code root location " PTR_FORMAT " "
 118                               "from nmethod " PTR_FORMAT " not in strong "
 119                               "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
 120                               p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
 121         _failures = true;
 122       }
 123     }
 124   }
 125 
 126 public:
 127   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
 128     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
 129 
 130   void do_oop(oop* p) { do_oop_work(p); }
 131   void do_oop(narrowOop* p) { do_oop_work(p); }
 132 
 133   void set_nmethod(nmethod* nm) { _nm = nm; }
 134   bool failures() { return _failures; }
 135 };
 136 
 137 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
 138   G1VerifyCodeRootOopClosure* _oop_cl;
 139 
 140 public:
 141   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
 142     _oop_cl(oop_cl) {}
 143 
 144   void do_code_blob(CodeBlob* cb) {
 145     nmethod* nm = cb->as_nmethod_or_null();
 146     if (nm != NULL) {
 147       _oop_cl->set_nmethod(nm);
 148       nm->oops_do(_oop_cl);
 149     }
 150   }
 151 };
 152 
 153 class YoungRefCounterClosure : public OopClosure {
 154   G1CollectedHeap* _g1h;
 155   int              _count;
 156  public:
 157   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
 158   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
 159   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 160 
 161   int count() { return _count; }
 162   void reset_count() { _count = 0; };
 163 };
 164 
 165 class VerifyKlassClosure: public KlassClosure {
 166   YoungRefCounterClosure _young_ref_counter_closure;
 167   OopClosure *_oop_closure;
 168  public:
 169   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
 170   void do_klass(Klass* k) {
 171     k->oops_do(_oop_closure);
 172 
 173     _young_ref_counter_closure.reset_count();
 174     k->oops_do(&_young_ref_counter_closure);
 175     if (_young_ref_counter_closure.count() > 0) {
 176       guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
 177     }
 178   }
 179 };
 180 
 181 class VerifyLivenessOopClosure: public OopClosure {
 182   G1CollectedHeap* _g1h;
 183   VerifyOption _vo;
 184 public:
 185   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
 186     _g1h(g1h), _vo(vo)
 187   { }
 188   void do_oop(narrowOop *p) { do_oop_work(p); }
 189   void do_oop(      oop *p) { do_oop_work(p); }
 190 
 191   template <class T> void do_oop_work(T *p) {
 192     oop obj = oopDesc::load_decode_heap_oop(p);
 193     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
 194               "Dead object referenced by a not dead object");
 195   }
 196 };
 197 
 198 class VerifyObjsInRegionClosure: public ObjectClosure {
 199 private:
 200   G1CollectedHeap* _g1h;
 201   size_t _live_bytes;
 202   HeapRegion *_hr;
 203   VerifyOption _vo;
 204 public:
 205   // _vo == UsePrevMarking -> use "prev" marking information,
 206   // _vo == UseNextMarking -> use "next" marking information,
 207   // _vo == UseMarkWord    -> use mark word from object header.
 208   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
 209     : _live_bytes(0), _hr(hr), _vo(vo) {
 210     _g1h = G1CollectedHeap::heap();
 211   }
 212   void do_object(oop o) {
 213     VerifyLivenessOopClosure isLive(_g1h, _vo);
 214     assert(o != NULL, "Huh?");
 215     if (!_g1h->is_obj_dead_cond(o, _vo)) {
 216       // If the object is alive according to the mark word,
 217       // then verify that the marking information agrees.
 218       // Note we can't verify the contra-positive of the
 219       // above: if the object is dead (according to the mark
 220       // word), it may not be marked, or may have been marked
 221       // but has since became dead, or may have been allocated
 222       // since the last marking.
 223       if (_vo == VerifyOption_G1UseMarkWord) {
 224         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
 225       }
 226 
 227       o->oop_iterate_no_header(&isLive);
 228       if (!_hr->obj_allocated_since_prev_marking(o)) {
 229         size_t obj_size = o->size();    // Make sure we don't overflow
 230         _live_bytes += (obj_size * HeapWordSize);
 231       }
 232     }
 233   }
 234   size_t live_bytes() { return _live_bytes; }
 235 };
 236 
 237 class VerifyArchiveOopClosure: public OopClosure {
 238 public:
 239   VerifyArchiveOopClosure(HeapRegion *hr) { }
 240   void do_oop(narrowOop *p) { do_oop_work(p); }
 241   void do_oop(      oop *p) { do_oop_work(p); }
 242 
 243   template <class T> void do_oop_work(T *p) {
 244     oop obj = oopDesc::load_decode_heap_oop(p);
 245     guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj),
 246               "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
 247               p2i(p), p2i(obj));
 248   }
 249 };
 250 
 251 class VerifyArchiveRegionClosure: public ObjectClosure {
 252 public:
 253   VerifyArchiveRegionClosure(HeapRegion *hr) { }
 254   // Verify that all object pointers are to archive regions.
 255   void do_object(oop o) {
 256     VerifyArchiveOopClosure checkOop(NULL);
 257     assert(o != NULL, "Should not be here for NULL oops");
 258     o->oop_iterate_no_header(&checkOop);
 259   }
 260 };
 261 
 262 class VerifyRegionClosure: public HeapRegionClosure {
 263 private:
 264   bool             _par;
 265   VerifyOption     _vo;
 266   bool             _failures;
 267 public:
 268   // _vo == UsePrevMarking -> use "prev" marking information,
 269   // _vo == UseNextMarking -> use "next" marking information,
 270   // _vo == UseMarkWord    -> use mark word from object header.
 271   VerifyRegionClosure(bool par, VerifyOption vo)
 272     : _par(par),
 273       _vo(vo),
 274       _failures(false) {}
 275 
 276   bool failures() {
 277     return _failures;
 278   }
 279 
 280   bool doHeapRegion(HeapRegion* r) {
 281     // For archive regions, verify there are no heap pointers to
 282     // non-pinned regions. For all others, verify liveness info.
 283     if (r->is_archive()) {
 284       VerifyArchiveRegionClosure verify_oop_pointers(r);
 285       r->object_iterate(&verify_oop_pointers);
 286       return true;
 287     }
 288     if (!r->is_continues_humongous()) {
 289       bool failures = false;
 290       r->verify(_vo, &failures);
 291       if (failures) {
 292         _failures = true;
 293       } else if (!r->is_starts_humongous()) {
 294         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
 295         r->object_iterate(&not_dead_yet_cl);
 296         if (_vo != VerifyOption_G1UseNextMarking) {
 297           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
 298             log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
 299                                   p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
 300             _failures = true;
 301           }
 302         } else {
 303           // When vo == UseNextMarking we cannot currently do a sanity
 304           // check on the live bytes as the calculation has not been
 305           // finalized yet.
 306         }
 307       }
 308     }
 309     return false; // stop the region iteration if we hit a failure
 310   }
 311 };
 312 
 313 // This is the task used for parallel verification of the heap regions
 314 
 315 class G1ParVerifyTask: public AbstractGangTask {
 316 private:
 317   G1CollectedHeap*  _g1h;
 318   VerifyOption      _vo;
 319   bool              _failures;
 320   HeapRegionClaimer _hrclaimer;
 321 
 322 public:
 323   // _vo == UsePrevMarking -> use "prev" marking information,
 324   // _vo == UseNextMarking -> use "next" marking information,
 325   // _vo == UseMarkWord    -> use mark word from object header.
 326   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
 327       AbstractGangTask("Parallel verify task"),
 328       _g1h(g1h),
 329       _vo(vo),
 330       _failures(false),
 331       _hrclaimer(g1h->workers()->active_workers()) {}
 332 
 333   bool failures() {
 334     return _failures;
 335   }
 336 
 337   void work(uint worker_id) {
 338     HandleMark hm;
 339     VerifyRegionClosure blk(true, _vo);
 340     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
 341     if (blk.failures()) {
 342       _failures = true;
 343     }
 344   }
 345 };
 346 
 347 
 348 void G1HeapVerifier::verify(VerifyOption vo) {
 349   if (!SafepointSynchronize::is_at_safepoint()) {
 350     log_info(gc, verify)("Skipping verification. Not at safepoint.");
 351   }
 352 
 353   assert(Thread::current()->is_VM_thread(),
 354          "Expected to be executed serially by the VM thread at this point");
 355 
 356   log_debug(gc, verify)("Roots");
 357   VerifyRootsClosure rootsCl(vo);
 358   VerifyKlassClosure klassCl(_g1h, &rootsCl);
 359   CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
 360 
 361   // We apply the relevant closures to all the oops in the
 362   // system dictionary, class loader data graph, the string table
 363   // and the nmethods in the code cache.
 364   G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
 365   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
 366 
 367   {
 368     G1RootProcessor root_processor(_g1h, 1);
 369     root_processor.process_all_roots(&rootsCl,
 370                                      &cldCl,
 371                                      &blobsCl);
 372   }
 373 
 374   bool failures = rootsCl.failures() || codeRootsCl.failures();
 375 
 376   if (vo != VerifyOption_G1UseMarkWord) {
 377     // If we're verifying during a full GC then the region sets
 378     // will have been torn down at the start of the GC. Therefore
 379     // verifying the region sets will fail. So we only verify
 380     // the region sets when not in a full GC.
 381     log_debug(gc, verify)("HeapRegionSets");
 382     verify_region_sets();
 383   }
 384 
 385   log_debug(gc, verify)("HeapRegions");
 386   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
 387 
 388     G1ParVerifyTask task(_g1h, vo);
 389     _g1h->workers()->run_task(&task);
 390     if (task.failures()) {
 391       failures = true;
 392     }
 393 
 394   } else {
 395     VerifyRegionClosure blk(false, vo);
 396     _g1h->heap_region_iterate(&blk);
 397     if (blk.failures()) {
 398       failures = true;
 399     }
 400   }
 401 
 402   if (G1StringDedup::is_enabled()) {
 403     log_debug(gc, verify)("StrDedup");
 404     G1StringDedup::verify();
 405   }
 406 
 407   if (failures) {
 408     log_error(gc, verify)("Heap after failed verification:");
 409     // It helps to have the per-region information in the output to
 410     // help us track down what went wrong. This is why we call
 411     // print_extended_on() instead of print_on().
 412     Log(gc, verify) log;
 413     ResourceMark rm;
 414     // Unconditional write?
 415     LogStream ls(log.error());
 416     _g1h->print_extended_on(&ls);
 417   }
 418   guarantee(!failures, "there should not have been any failures");
 419 }
 420 
 421 // Heap region set verification
 422 
 423 class VerifyRegionListsClosure : public HeapRegionClosure {
 424 private:
 425   HeapRegionSet*   _old_set;
 426   HeapRegionSet*   _humongous_set;
 427   HeapRegionManager*   _hrm;
 428 
 429 public:
 430   uint _old_count;
 431   uint _humongous_count;
 432   uint _free_count;
 433 
 434   VerifyRegionListsClosure(HeapRegionSet* old_set,
 435                            HeapRegionSet* humongous_set,
 436                            HeapRegionManager* hrm) :
 437     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
 438     _old_count(), _humongous_count(), _free_count(){ }
 439 
 440   bool doHeapRegion(HeapRegion* hr) {
 441     if (hr->is_young()) {
 442       // TODO
 443     } else if (hr->is_humongous()) {
 444       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
 445       _humongous_count++;
 446     } else if (hr->is_empty()) {
 447       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
 448       _free_count++;
 449     } else if (hr->is_old()) {
 450       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
 451       _old_count++;
 452     } else {
 453       // There are no other valid region types. Check for one invalid
 454       // one we can identify: pinned without old or humongous set.
 455       assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
 456       ShouldNotReachHere();
 457     }
 458     return false;
 459   }
 460 
 461   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
 462     guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
 463     guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
 464     guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
 465   }
 466 };
 467 
 468 void G1HeapVerifier::verify_region_sets() {
 469   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 470 
 471   // First, check the explicit lists.
 472   _g1h->_hrm.verify();
 473   {
 474     // Given that a concurrent operation might be adding regions to
 475     // the secondary free list we have to take the lock before
 476     // verifying it.
 477     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 478     _g1h->_secondary_free_list.verify_list();
 479   }
 480 
 481   // If a concurrent region freeing operation is in progress it will
 482   // be difficult to correctly attributed any free regions we come
 483   // across to the correct free list given that they might belong to
 484   // one of several (free_list, secondary_free_list, any local lists,
 485   // etc.). So, if that's the case we will skip the rest of the
 486   // verification operation. Alternatively, waiting for the concurrent
 487   // operation to complete will have a non-trivial effect on the GC's
 488   // operation (no concurrent operation will last longer than the
 489   // interval between two calls to verification) and it might hide
 490   // any issues that we would like to catch during testing.
 491   if (_g1h->free_regions_coming()) {
 492     return;
 493   }
 494 
 495   // Make sure we append the secondary_free_list on the free_list so
 496   // that all free regions we will come across can be safely
 497   // attributed to the free_list.
 498   _g1h->append_secondary_free_list_if_not_empty_with_lock();
 499 
 500   // Finally, make sure that the region accounting in the lists is
 501   // consistent with what we see in the heap.
 502 
 503   VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
 504   _g1h->heap_region_iterate(&cl);
 505   cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
 506 }
 507 
 508 void G1HeapVerifier::prepare_for_verify() {
 509   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
 510     _g1h->ensure_parsability(false);
 511   }
 512   _g1h->g1_rem_set()->prepare_for_verify();
 513 }
 514 
 515 double G1HeapVerifier::verify(bool guard, const char* msg) {
 516   double verify_time_ms = 0.0;
 517 
 518   if (guard && _g1h->total_collections() >= VerifyGCStartAt) {
 519     double verify_start = os::elapsedTime();
 520     HandleMark hm;  // Discard invalid handles created during verification
 521     prepare_for_verify();
 522     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
 523     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
 524   }
 525 
 526   return verify_time_ms;
 527 }
 528 
 529 void G1HeapVerifier::verify_before_gc() {
 530   double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
 531   _g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
 532 }
 533 
 534 void G1HeapVerifier::verify_after_gc() {
 535   double verify_time_ms = verify(VerifyAfterGC, "After GC");
 536   _g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
 537 }
 538 
 539 
 540 #ifndef PRODUCT
 541 class G1VerifyCardTableCleanup: public HeapRegionClosure {
 542   G1HeapVerifier* _verifier;
 543   G1SATBCardTableModRefBS* _ct_bs;
 544 public:
 545   G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
 546     : _verifier(verifier), _ct_bs(ct_bs) { }
 547   virtual bool doHeapRegion(HeapRegion* r) {
 548     if (r->is_survivor()) {
 549       _verifier->verify_dirty_region(r);
 550     } else {
 551       _verifier->verify_not_dirty_region(r);
 552     }
 553     return false;
 554   }
 555 };
 556 
 557 void G1HeapVerifier::verify_card_table_cleanup() {
 558   if (G1VerifyCTCleanup || VerifyAfterGC) {
 559     G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
 560     _g1h->heap_region_iterate(&cleanup_verifier);
 561   }
 562 }
 563 
 564 void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
 565   // All of the region should be clean.
 566   G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
 567   MemRegion mr(hr->bottom(), hr->end());
 568   ct_bs->verify_not_dirty_region(mr);
 569 }
 570 
 571 void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
 572   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
 573   // dirty allocated blocks as they allocate them. The thread that
 574   // retires each region and replaces it with a new one will do a
 575   // maximal allocation to fill in [pre_dummy_top(),end()] but will
 576   // not dirty that area (one less thing to have to do while holding
 577   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
 578   // is dirty.
 579   G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
 580   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
 581   if (hr->is_young()) {
 582     ct_bs->verify_g1_young_region(mr);
 583   } else {
 584     ct_bs->verify_dirty_region(mr);
 585   }
 586 }
 587 
 588 class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
 589 private:
 590   G1HeapVerifier* _verifier;
 591 public:
 592   G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
 593   virtual bool doHeapRegion(HeapRegion* r) {
 594     _verifier->verify_dirty_region(r);
 595     return false;
 596   }
 597 };
 598 
 599 void G1HeapVerifier::verify_dirty_young_regions() {
 600   G1VerifyDirtyYoungListClosure cl(this);
 601   _g1h->collection_set()->iterate(&cl);
 602 }
 603 
 604 bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,
 605                                                HeapWord* tams, HeapWord* end) {
 606   guarantee(tams <= end,
 607             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
 608   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
 609   if (result < end) {
 610     log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
 611     log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
 612     return false;
 613   }
 614   return true;
 615 }
 616 
 617 bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
 618   G1CMBitMapRO* prev_bitmap = _g1h->concurrent_mark()->prevMarkBitMap();
 619   G1CMBitMapRO* next_bitmap = (G1CMBitMapRO*) _g1h->concurrent_mark()->nextMarkBitMap();
 620 
 621   HeapWord* bottom = hr->bottom();
 622   HeapWord* ptams  = hr->prev_top_at_mark_start();
 623   HeapWord* ntams  = hr->next_top_at_mark_start();
 624   HeapWord* end    = hr->end();
 625 
 626   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
 627 
 628   bool res_n = true;
 629   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
 630   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
 631   // if we happen to be in that state.
 632   if (_g1h->collector_state()->mark_in_progress() || !_g1h->_cmThread->in_progress()) {
 633     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
 634   }
 635   if (!res_p || !res_n) {
 636     log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
 637     log_error(gc, verify)("#### Caller: %s", caller);
 638     return false;
 639   }
 640   return true;
 641 }
 642 
 643 void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
 644   if (!G1VerifyBitmaps) return;
 645 
 646   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
 647 }
 648 
 649 class G1VerifyBitmapClosure : public HeapRegionClosure {
 650 private:
 651   const char* _caller;
 652   G1HeapVerifier* _verifier;
 653   bool _failures;
 654 
 655 public:
 656   G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
 657     _caller(caller), _verifier(verifier), _failures(false) { }
 658 
 659   bool failures() { return _failures; }
 660 
 661   virtual bool doHeapRegion(HeapRegion* hr) {
 662     bool result = _verifier->verify_bitmaps(_caller, hr);
 663     if (!result) {
 664       _failures = true;
 665     }
 666     return false;
 667   }
 668 };
 669 
 670 void G1HeapVerifier::check_bitmaps(const char* caller) {
 671   if (!G1VerifyBitmaps) return;
 672 
 673   G1VerifyBitmapClosure cl(caller, this);
 674   _g1h->heap_region_iterate(&cl);
 675   guarantee(!cl.failures(), "bitmap verification");
 676 }
 677 
 678 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
 679  private:
 680   bool _failures;
 681  public:
 682   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
 683 
 684   virtual bool doHeapRegion(HeapRegion* hr) {
 685     uint i = hr->hrm_index();
 686     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
 687     if (hr->is_humongous()) {
 688       if (hr->in_collection_set()) {
 689         log_error(gc, verify)("## humongous region %u in CSet", i);
 690         _failures = true;
 691         return true;
 692       }
 693       if (cset_state.is_in_cset()) {
 694         log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
 695         _failures = true;
 696         return true;
 697       }
 698       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
 699         log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
 700         _failures = true;
 701         return true;
 702       }
 703     } else {
 704       if (cset_state.is_humongous()) {
 705         log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
 706         _failures = true;
 707         return true;
 708       }
 709       if (hr->in_collection_set() != cset_state.is_in_cset()) {
 710         log_error(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
 711                              hr->in_collection_set(), cset_state.value(), i);
 712         _failures = true;
 713         return true;
 714       }
 715       if (cset_state.is_in_cset()) {
 716         if (hr->is_young() != (cset_state.is_young())) {
 717           log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
 718                                hr->is_young(), cset_state.value(), i);
 719           _failures = true;
 720           return true;
 721         }
 722         if (hr->is_old() != (cset_state.is_old())) {
 723           log_error(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
 724                                hr->is_old(), cset_state.value(), i);
 725           _failures = true;
 726           return true;
 727         }
 728       }
 729     }
 730     return false;
 731   }
 732 
 733   bool failures() const { return _failures; }
 734 };
 735 
 736 bool G1HeapVerifier::check_cset_fast_test() {
 737   G1CheckCSetFastTableClosure cl;
 738   _g1h->_hrm.iterate(&cl);
 739   return !cl.failures();
 740 }
 741 #endif // PRODUCT