1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentMarkThread.hpp"
  27 #include "gc/g1/g1Allocator.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1HeapVerifier.hpp"
  31 #include "gc/g1/g1Policy.hpp"
  32 #include "gc/g1/g1RemSet.hpp"
  33 #include "gc/g1/g1RootProcessor.hpp"
  34 #include "gc/g1/heapRegion.hpp"
  35 #include "gc/g1/heapRegion.inline.hpp"
  36 #include "gc/g1/heapRegionRemSet.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/access.inline.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 
  46 int G1HeapVerifier::_enabled_verification_types = G1HeapVerifier::G1VerifyAll;
  47 
  48 class VerifyRootsClosure: public OopClosure {
  49 private:
  50   G1CollectedHeap* _g1h;
  51   VerifyOption     _vo;
  52   bool             _failures;
  53 public:
  54   // _vo == UsePrevMarking -> use "prev" marking information,
  55   // _vo == UseNextMarking -> use "next" marking information,
  56   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
  57   VerifyRootsClosure(VerifyOption vo) :
  58     _g1h(G1CollectedHeap::heap()),
  59     _vo(vo),
  60     _failures(false) { }
  61 
  62   bool failures() { return _failures; }
  63 
  64   template <class T> void do_oop_nv(T* p) {
  65     T heap_oop = RawAccess<>::oop_load(p);
  66     if (!CompressedOops::is_null(heap_oop)) {
  67       oop obj = CompressedOops::decode_not_null(heap_oop);
  68       if (_g1h->is_obj_dead_cond(obj, _vo)) {
  69         Log(gc, verify) log;
  70         log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
  71         ResourceMark rm;
  72         LogStream ls(log.error());
  73         obj->print_on(&ls);
  74         _failures = true;
  75       }
  76     }
  77   }
  78 
  79   void do_oop(oop* p)       { do_oop_nv(p); }
  80   void do_oop(narrowOop* p) { do_oop_nv(p); }
  81 };
  82 
  83 class G1VerifyCodeRootOopClosure: public OopClosure {
  84   G1CollectedHeap* _g1h;
  85   OopClosure* _root_cl;
  86   nmethod* _nm;
  87   VerifyOption _vo;
  88   bool _failures;
  89 
  90   template <class T> void do_oop_work(T* p) {
  91     // First verify that this root is live
  92     _root_cl->do_oop(p);
  93 
  94     if (!G1VerifyHeapRegionCodeRoots) {
  95       // We're not verifying the code roots attached to heap region.
  96       return;
  97     }
  98 
  99     // Don't check the code roots during marking verification in a full GC
 100     if (_vo == VerifyOption_G1UseFullMarking) {
 101       return;
 102     }
 103 
 104     // Now verify that the current nmethod (which contains p) is
 105     // in the code root list of the heap region containing the
 106     // object referenced by p.
 107 
 108     T heap_oop = RawAccess<>::oop_load(p);
 109     if (!CompressedOops::is_null(heap_oop)) {
 110       oop obj = CompressedOops::decode_not_null(heap_oop);
 111 
 112       // Now fetch the region containing the object
 113       HeapRegion* hr = _g1h->heap_region_containing(obj);
 114       HeapRegionRemSet* hrrs = hr->rem_set();
 115       // Verify that the strong code root list for this region
 116       // contains the nmethod
 117       if (!hrrs->strong_code_roots_list_contains(_nm)) {
 118         log_error(gc, verify)("Code root location " PTR_FORMAT " "
 119                               "from nmethod " PTR_FORMAT " not in strong "
 120                               "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
 121                               p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
 122         _failures = true;
 123       }
 124     }
 125   }
 126 
 127 public:
 128   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
 129     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
 130 
 131   void do_oop(oop* p) { do_oop_work(p); }
 132   void do_oop(narrowOop* p) { do_oop_work(p); }
 133 
 134   void set_nmethod(nmethod* nm) { _nm = nm; }
 135   bool failures() { return _failures; }
 136 };
 137 
 138 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
 139   G1VerifyCodeRootOopClosure* _oop_cl;
 140 
 141 public:
 142   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
 143     _oop_cl(oop_cl) {}
 144 
 145   void do_code_blob(CodeBlob* cb) {
 146     nmethod* nm = cb->as_nmethod_or_null();
 147     if (nm != NULL) {
 148       _oop_cl->set_nmethod(nm);
 149       nm->oops_do(_oop_cl);
 150     }
 151   }
 152 };
 153 
 154 class YoungRefCounterClosure : public OopClosure {
 155   G1CollectedHeap* _g1h;
 156   int              _count;
 157  public:
 158   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
 159   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
 160   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 161 
 162   int count() { return _count; }
 163   void reset_count() { _count = 0; };
 164 };
 165 
 166 class VerifyCLDClosure: public CLDClosure {
 167   YoungRefCounterClosure _young_ref_counter_closure;
 168   OopClosure *_oop_closure;
 169  public:
 170   VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
 171   void do_cld(ClassLoaderData* cld) {
 172     cld->oops_do(_oop_closure, false);
 173 
 174     _young_ref_counter_closure.reset_count();
 175     cld->oops_do(&_young_ref_counter_closure, false);
 176     if (_young_ref_counter_closure.count() > 0) {
 177       guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty.", p2i(cld), _young_ref_counter_closure.count());
 178     }
 179   }
 180 };
 181 
 182 class VerifyLivenessOopClosure: public OopClosure {
 183   G1CollectedHeap* _g1h;
 184   VerifyOption _vo;
 185 public:
 186   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
 187     _g1h(g1h), _vo(vo)
 188   { }
 189   void do_oop(narrowOop *p) { do_oop_work(p); }
 190   void do_oop(      oop *p) { do_oop_work(p); }
 191 
 192   template <class T> void do_oop_work(T *p) {
 193     oop obj = RawAccess<>::oop_load(p);
 194     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
 195               "Dead object referenced by a not dead object");
 196   }
 197 };
 198 
 199 class VerifyObjsInRegionClosure: public ObjectClosure {
 200 private:
 201   G1CollectedHeap* _g1h;
 202   size_t _live_bytes;
 203   HeapRegion *_hr;
 204   VerifyOption _vo;
 205 public:
 206   // _vo == UsePrevMarking -> use "prev" marking information,
 207   // _vo == UseNextMarking -> use "next" marking information,
 208   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
 209   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
 210     : _live_bytes(0), _hr(hr), _vo(vo) {
 211     _g1h = G1CollectedHeap::heap();
 212   }
 213   void do_object(oop o) {
 214     VerifyLivenessOopClosure isLive(_g1h, _vo);
 215     assert(o != NULL, "Huh?");
 216     if (!_g1h->is_obj_dead_cond(o, _vo)) {
 217       // If the object is alive according to the full gc mark,
 218       // then verify that the marking information agrees.
 219       // Note we can't verify the contra-positive of the
 220       // above: if the object is dead (according to the mark
 221       // word), it may not be marked, or may have been marked
 222       // but has since became dead, or may have been allocated
 223       // since the last marking.
 224       if (_vo == VerifyOption_G1UseFullMarking) {
 225         guarantee(!_g1h->is_obj_dead(o), "Full GC marking and concurrent mark mismatch");
 226       }
 227 
 228       o->oop_iterate_no_header(&isLive);
 229       if (!_hr->obj_allocated_since_prev_marking(o)) {
 230         size_t obj_size = o->size();    // Make sure we don't overflow
 231         _live_bytes += (obj_size * HeapWordSize);
 232       }
 233     }
 234   }
 235   size_t live_bytes() { return _live_bytes; }
 236 };
 237 
 238 class VerifyArchiveOopClosure: public OopClosure {
 239   HeapRegion* _hr;
 240 public:
 241   VerifyArchiveOopClosure(HeapRegion *hr)
 242     : _hr(hr) { }
 243   void do_oop(narrowOop *p) { do_oop_work(p); }
 244   void do_oop(      oop *p) { do_oop_work(p); }
 245 
 246   template <class T> void do_oop_work(T *p) {
 247     oop obj = RawAccess<>::oop_load(p);
 248 
 249     if (_hr->is_open_archive()) {
 250       guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj),
 251                 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
 252                 p2i(p), p2i(obj));
 253     } else {
 254       assert(_hr->is_closed_archive(), "should be closed archive region");
 255       guarantee(obj == NULL || G1ArchiveAllocator::is_closed_archive_object(obj),
 256                 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
 257                 p2i(p), p2i(obj));
 258     }
 259   }
 260 };
 261 
 262 class VerifyObjectInArchiveRegionClosure: public ObjectClosure {
 263   HeapRegion* _hr;
 264 public:
 265   VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose)
 266     : _hr(hr) { }
 267   // Verify that all object pointers are to archive regions.
 268   void do_object(oop o) {
 269     VerifyArchiveOopClosure checkOop(_hr);
 270     assert(o != NULL, "Should not be here for NULL oops");
 271     o->oop_iterate_no_header(&checkOop);
 272   }
 273 };
 274 
 275 // Should be only used at CDS dump time
 276 class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
 277 private:
 278   G1CollectedHeap* _g1h;
 279 public:
 280   VerifyArchivePointerRegionClosure(G1CollectedHeap* g1h) { }
 281   virtual bool do_heap_region(HeapRegion* r) {
 282    if (r->is_archive()) {
 283       VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
 284       r->object_iterate(&verify_oop_pointers);
 285     }
 286     return false;
 287   }
 288 };
 289 
 290 void G1HeapVerifier::verify_archive_regions() {
 291   G1CollectedHeap*  g1h = G1CollectedHeap::heap();
 292   VerifyArchivePointerRegionClosure cl(NULL);
 293   g1h->heap_region_iterate(&cl);
 294 }
 295 
 296 class VerifyRegionClosure: public HeapRegionClosure {
 297 private:
 298   bool             _par;
 299   VerifyOption     _vo;
 300   bool             _failures;
 301 public:
 302   // _vo == UsePrevMarking -> use "prev" marking information,
 303   // _vo == UseNextMarking -> use "next" marking information,
 304   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
 305   VerifyRegionClosure(bool par, VerifyOption vo)
 306     : _par(par),
 307       _vo(vo),
 308       _failures(false) {}
 309 
 310   bool failures() {
 311     return _failures;
 312   }
 313 
 314   bool do_heap_region(HeapRegion* r) {
 315     guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
 316     // Humongous and old regions regions might be of any state, so can't check here.
 317     guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
 318     // For archive regions, verify there are no heap pointers to
 319     // non-pinned regions. For all others, verify liveness info.
 320     if (r->is_closed_archive()) {
 321       VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
 322       r->object_iterate(&verify_oop_pointers);
 323       return true;
 324     } else if (r->is_open_archive()) {
 325       VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo);
 326       r->object_iterate(&verify_open_archive_oop);
 327       return true;
 328     } else if (!r->is_continues_humongous()) {
 329       bool failures = false;
 330       r->verify(_vo, &failures);
 331       if (failures) {
 332         _failures = true;
 333       } else if (!r->is_starts_humongous()) {
 334         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
 335         r->object_iterate(&not_dead_yet_cl);
 336         if (_vo != VerifyOption_G1UseNextMarking) {
 337           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
 338             log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
 339                                   p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
 340             _failures = true;
 341           }
 342         } else {
 343           // When vo == UseNextMarking we cannot currently do a sanity
 344           // check on the live bytes as the calculation has not been
 345           // finalized yet.
 346         }
 347       }
 348     }
 349     return false; // stop the region iteration if we hit a failure
 350   }
 351 };
 352 
 353 // This is the task used for parallel verification of the heap regions
 354 
 355 class G1ParVerifyTask: public AbstractGangTask {
 356 private:
 357   G1CollectedHeap*  _g1h;
 358   VerifyOption      _vo;
 359   bool              _failures;
 360   HeapRegionClaimer _hrclaimer;
 361 
 362 public:
 363   // _vo == UsePrevMarking -> use "prev" marking information,
 364   // _vo == UseNextMarking -> use "next" marking information,
 365   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
 366   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
 367       AbstractGangTask("Parallel verify task"),
 368       _g1h(g1h),
 369       _vo(vo),
 370       _failures(false),
 371       _hrclaimer(g1h->workers()->active_workers()) {}
 372 
 373   bool failures() {
 374     return _failures;
 375   }
 376 
 377   void work(uint worker_id) {
 378     HandleMark hm;
 379     VerifyRegionClosure blk(true, _vo);
 380     _g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
 381     if (blk.failures()) {
 382       _failures = true;
 383     }
 384   }
 385 };
 386 
 387 void G1HeapVerifier::enable_verification_type(G1VerifyType type) {
 388   // First enable will clear _enabled_verification_types.
 389   if (_enabled_verification_types == G1VerifyAll) {
 390     _enabled_verification_types = type;
 391   } else {
 392     _enabled_verification_types |= type;
 393   }
 394 }
 395 
 396 bool G1HeapVerifier::should_verify(G1VerifyType type) {
 397   return (_enabled_verification_types & type) == type;
 398 }
 399 
 400 void G1HeapVerifier::verify(VerifyOption vo) {
 401   if (!SafepointSynchronize::is_at_safepoint()) {
 402     log_info(gc, verify)("Skipping verification. Not at safepoint.");
 403   }
 404 
 405   assert(Thread::current()->is_VM_thread(),
 406          "Expected to be executed serially by the VM thread at this point");
 407 
 408   log_debug(gc, verify)("Roots");
 409   VerifyRootsClosure rootsCl(vo);
 410   VerifyCLDClosure cldCl(_g1h, &rootsCl);
 411 
 412   // We apply the relevant closures to all the oops in the
 413   // system dictionary, class loader data graph, the string table
 414   // and the nmethods in the code cache.
 415   G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
 416   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
 417 
 418   {
 419     G1RootProcessor root_processor(_g1h, 1);
 420     root_processor.process_all_roots(&rootsCl,
 421                                      &cldCl,
 422                                      &blobsCl);
 423   }
 424 
 425   bool failures = rootsCl.failures() || codeRootsCl.failures();
 426 
 427   if (!_g1h->g1_policy()->collector_state()->in_full_gc()) {
 428     // If we're verifying during a full GC then the region sets
 429     // will have been torn down at the start of the GC. Therefore
 430     // verifying the region sets will fail. So we only verify
 431     // the region sets when not in a full GC.
 432     log_debug(gc, verify)("HeapRegionSets");
 433     verify_region_sets();
 434   }
 435 
 436   log_debug(gc, verify)("HeapRegions");
 437   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
 438 
 439     G1ParVerifyTask task(_g1h, vo);
 440     _g1h->workers()->run_task(&task);
 441     if (task.failures()) {
 442       failures = true;
 443     }
 444 
 445   } else {
 446     VerifyRegionClosure blk(false, vo);
 447     _g1h->heap_region_iterate(&blk);
 448     if (blk.failures()) {
 449       failures = true;
 450     }
 451   }
 452 
 453   if (G1StringDedup::is_enabled()) {
 454     log_debug(gc, verify)("StrDedup");
 455     G1StringDedup::verify();
 456   }
 457 
 458   if (failures) {
 459     log_error(gc, verify)("Heap after failed verification (kind %d):", vo);
 460     // It helps to have the per-region information in the output to
 461     // help us track down what went wrong. This is why we call
 462     // print_extended_on() instead of print_on().
 463     Log(gc, verify) log;
 464     ResourceMark rm;
 465     LogStream ls(log.error());
 466     _g1h->print_extended_on(&ls);
 467   }
 468   guarantee(!failures, "there should not have been any failures");
 469 }
 470 
 471 // Heap region set verification
 472 
 473 class VerifyRegionListsClosure : public HeapRegionClosure {
 474 private:
 475   HeapRegionSet*   _old_set;
 476   HeapRegionSet*   _humongous_set;
 477   HeapRegionManager*   _hrm;
 478 
 479 public:
 480   uint _old_count;
 481   uint _humongous_count;
 482   uint _free_count;
 483 
 484   VerifyRegionListsClosure(HeapRegionSet* old_set,
 485                            HeapRegionSet* humongous_set,
 486                            HeapRegionManager* hrm) :
 487     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
 488     _old_count(), _humongous_count(), _free_count(){ }
 489 
 490   bool do_heap_region(HeapRegion* hr) {
 491     if (hr->is_young()) {
 492       // TODO
 493     } else if (hr->is_humongous()) {
 494       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
 495       _humongous_count++;
 496     } else if (hr->is_empty()) {
 497       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
 498       _free_count++;
 499     } else if (hr->is_old()) {
 500       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
 501       _old_count++;
 502     } else {
 503       // There are no other valid region types. Check for one invalid
 504       // one we can identify: pinned without old or humongous set.
 505       assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
 506       ShouldNotReachHere();
 507     }
 508     return false;
 509   }
 510 
 511   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
 512     guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
 513     guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
 514     guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
 515   }
 516 };
 517 
 518 void G1HeapVerifier::verify_region_sets() {
 519   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 520 
 521   // First, check the explicit lists.
 522   _g1h->_hrm.verify();
 523 
 524   // Finally, make sure that the region accounting in the lists is
 525   // consistent with what we see in the heap.
 526 
 527   VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
 528   _g1h->heap_region_iterate(&cl);
 529   cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
 530 }
 531 
 532 void G1HeapVerifier::prepare_for_verify() {
 533   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
 534     _g1h->ensure_parsability(false);
 535   }
 536 }
 537 
 538 double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* msg) {
 539   double verify_time_ms = 0.0;
 540 
 541   if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) {
 542     double verify_start = os::elapsedTime();
 543     HandleMark hm;  // Discard invalid handles created during verification
 544     prepare_for_verify();
 545     Universe::verify(vo, msg);
 546     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
 547   }
 548 
 549   return verify_time_ms;
 550 }
 551 
 552 void G1HeapVerifier::verify_before_gc(G1VerifyType type) {
 553   if (VerifyBeforeGC) {
 554     double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "Before GC");
 555     _g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
 556   }
 557 }
 558 
 559 void G1HeapVerifier::verify_after_gc(G1VerifyType type) {
 560   if (VerifyAfterGC) {
 561     double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "After GC");
 562     _g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
 563   }
 564 }
 565 
 566 
 567 #ifndef PRODUCT
 568 class G1VerifyCardTableCleanup: public HeapRegionClosure {
 569   G1HeapVerifier* _verifier;
 570 public:
 571   G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
 572     : _verifier(verifier) { }
 573   virtual bool do_heap_region(HeapRegion* r) {
 574     if (r->is_survivor()) {
 575       _verifier->verify_dirty_region(r);
 576     } else {
 577       _verifier->verify_not_dirty_region(r);
 578     }
 579     return false;
 580   }
 581 };
 582 
 583 void G1HeapVerifier::verify_card_table_cleanup() {
 584   if (G1VerifyCTCleanup || VerifyAfterGC) {
 585     G1VerifyCardTableCleanup cleanup_verifier(this);
 586     _g1h->heap_region_iterate(&cleanup_verifier);
 587   }
 588 }
 589 
 590 void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
 591   // All of the region should be clean.
 592   G1CardTable* ct = _g1h->card_table();
 593   MemRegion mr(hr->bottom(), hr->end());
 594   ct->verify_not_dirty_region(mr);
 595 }
 596 
 597 void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
 598   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
 599   // dirty allocated blocks as they allocate them. The thread that
 600   // retires each region and replaces it with a new one will do a
 601   // maximal allocation to fill in [pre_dummy_top(),end()] but will
 602   // not dirty that area (one less thing to have to do while holding
 603   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
 604   // is dirty.
 605   G1CardTable* ct = _g1h->card_table();
 606   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
 607   if (hr->is_young()) {
 608     ct->verify_g1_young_region(mr);
 609   } else {
 610     ct->verify_dirty_region(mr);
 611   }
 612 }
 613 
 614 class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
 615 private:
 616   G1HeapVerifier* _verifier;
 617 public:
 618   G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
 619   virtual bool do_heap_region(HeapRegion* r) {
 620     _verifier->verify_dirty_region(r);
 621     return false;
 622   }
 623 };
 624 
 625 void G1HeapVerifier::verify_dirty_young_regions() {
 626   G1VerifyDirtyYoungListClosure cl(this);
 627   _g1h->collection_set()->iterate(&cl);
 628 }
 629 
 630 bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap,
 631                                                HeapWord* tams, HeapWord* end) {
 632   guarantee(tams <= end,
 633             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
 634   HeapWord* result = bitmap->get_next_marked_addr(tams, end);
 635   if (result < end) {
 636     log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
 637     log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
 638     return false;
 639   }
 640   return true;
 641 }
 642 
 643 bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
 644   const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap();
 645   const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap();
 646 
 647   HeapWord* ptams  = hr->prev_top_at_mark_start();
 648   HeapWord* ntams  = hr->next_top_at_mark_start();
 649   HeapWord* end    = hr->end();
 650 
 651   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
 652 
 653   bool res_n = true;
 654   // We cannot verify the next bitmap while we are about to clear it.
 655   if (!_g1h->collector_state()->clearing_next_bitmap()) {
 656     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
 657   }
 658   if (!res_p || !res_n) {
 659     log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
 660     log_error(gc, verify)("#### Caller: %s", caller);
 661     return false;
 662   }
 663   return true;
 664 }
 665 
 666 void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
 667   if (!G1VerifyBitmaps) {
 668     return;
 669   }
 670 
 671   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
 672 }
 673 
 674 class G1VerifyBitmapClosure : public HeapRegionClosure {
 675 private:
 676   const char* _caller;
 677   G1HeapVerifier* _verifier;
 678   bool _failures;
 679 
 680 public:
 681   G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
 682     _caller(caller), _verifier(verifier), _failures(false) { }
 683 
 684   bool failures() { return _failures; }
 685 
 686   virtual bool do_heap_region(HeapRegion* hr) {
 687     bool result = _verifier->verify_bitmaps(_caller, hr);
 688     if (!result) {
 689       _failures = true;
 690     }
 691     return false;
 692   }
 693 };
 694 
 695 void G1HeapVerifier::check_bitmaps(const char* caller) {
 696   if (!G1VerifyBitmaps) {
 697     return;
 698   }
 699 
 700   G1VerifyBitmapClosure cl(caller, this);
 701   _g1h->heap_region_iterate(&cl);
 702   guarantee(!cl.failures(), "bitmap verification");
 703 }
 704 
 705 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
 706  private:
 707   bool _failures;
 708  public:
 709   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
 710 
 711   virtual bool do_heap_region(HeapRegion* hr) {
 712     uint i = hr->hrm_index();
 713     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
 714     if (hr->is_humongous()) {
 715       if (hr->in_collection_set()) {
 716         log_error(gc, verify)("## humongous region %u in CSet", i);
 717         _failures = true;
 718         return true;
 719       }
 720       if (cset_state.is_in_cset()) {
 721         log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
 722         _failures = true;
 723         return true;
 724       }
 725       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
 726         log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
 727         _failures = true;
 728         return true;
 729       }
 730     } else {
 731       if (cset_state.is_humongous()) {
 732         log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
 733         _failures = true;
 734         return true;
 735       }
 736       if (hr->in_collection_set() != cset_state.is_in_cset()) {
 737         log_error(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
 738                              hr->in_collection_set(), cset_state.value(), i);
 739         _failures = true;
 740         return true;
 741       }
 742       if (cset_state.is_in_cset()) {
 743         if (hr->is_young() != (cset_state.is_young())) {
 744           log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
 745                                hr->is_young(), cset_state.value(), i);
 746           _failures = true;
 747           return true;
 748         }
 749         if (hr->is_old() != (cset_state.is_old())) {
 750           log_error(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
 751                                hr->is_old(), cset_state.value(), i);
 752           _failures = true;
 753           return true;
 754         }
 755       }
 756     }
 757     return false;
 758   }
 759 
 760   bool failures() const { return _failures; }
 761 };
 762 
 763 bool G1HeapVerifier::check_cset_fast_test() {
 764   G1CheckCSetFastTableClosure cl;
 765   _g1h->_hrm.iterate(&cl);
 766   return !cl.failures();
 767 }
 768 #endif // PRODUCT