1 /* 2 * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1Allocator.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1ConcurrentMarkThread.hpp" 30 #include "gc/g1/g1HeapVerifier.hpp" 31 #include "gc/g1/g1Policy.hpp" 32 #include "gc/g1/g1RemSet.hpp" 33 #include "gc/g1/g1RootProcessor.hpp" 34 #include "gc/g1/heapRegion.inline.hpp" 35 #include "gc/g1/heapRegionRemSet.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "logging/log.hpp" 38 #include "logging/logStream.hpp" 39 #include "memory/iterator.inline.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/access.inline.hpp" 43 #include "oops/compressedOops.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/handles.inline.hpp" 46 47 int G1HeapVerifier::_enabled_verification_types = G1HeapVerifier::G1VerifyAll; 48 49 class VerifyRootsClosure: public OopClosure { 50 private: 51 G1CollectedHeap* _g1h; 52 VerifyOption _vo; 53 bool _failures; 54 public: 55 // _vo == UsePrevMarking -> use "prev" marking information, 56 // _vo == UseNextMarking -> use "next" marking information, 57 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS 58 VerifyRootsClosure(VerifyOption vo) : 59 _g1h(G1CollectedHeap::heap()), 60 _vo(vo), 61 _failures(false) { } 62 63 bool failures() { return _failures; } 64 65 template <class T> void do_oop_work(T* p) { 66 T heap_oop = RawAccess<>::oop_load(p); 67 if (!CompressedOops::is_null(heap_oop)) { 68 oop obj = CompressedOops::decode_not_null(heap_oop); 69 if (_g1h->is_obj_dead_cond(obj, _vo)) { 70 Log(gc, verify) log; 71 log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); 72 ResourceMark rm; 73 LogStream ls(log.error()); 74 obj->print_on(&ls); 75 _failures = true; 76 } 77 } 78 } 79 80 void do_oop(oop* p) { do_oop_work(p); } 81 void do_oop(narrowOop* p) { do_oop_work(p); } 82 }; 83 84 class G1VerifyCodeRootOopClosure: public OopClosure { 85 G1CollectedHeap* _g1h; 86 OopClosure* _root_cl; 87 nmethod* _nm; 88 VerifyOption _vo; 89 bool _failures; 90 91 template <class T> void do_oop_work(T* p) { 92 // First verify that this root is live 93 _root_cl->do_oop(p); 94 95 if (!G1VerifyHeapRegionCodeRoots) { 96 // We're not verifying the code roots attached to heap region. 97 return; 98 } 99 100 // Don't check the code roots during marking verification in a full GC 101 if (_vo == VerifyOption_G1UseFullMarking) { 102 return; 103 } 104 105 // Now verify that the current nmethod (which contains p) is 106 // in the code root list of the heap region containing the 107 // object referenced by p. 108 109 T heap_oop = RawAccess<>::oop_load(p); 110 if (!CompressedOops::is_null(heap_oop)) { 111 oop obj = CompressedOops::decode_not_null(heap_oop); 112 113 // Now fetch the region containing the object 114 HeapRegion* hr = _g1h->heap_region_containing(obj); 115 HeapRegionRemSet* hrrs = hr->rem_set(); 116 // Verify that the strong code root list for this region 117 // contains the nmethod 118 if (!hrrs->strong_code_roots_list_contains(_nm)) { 119 log_error(gc, verify)("Code root location " PTR_FORMAT " " 120 "from nmethod " PTR_FORMAT " not in strong " 121 "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")", 122 p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end())); 123 _failures = true; 124 } 125 } 126 } 127 128 public: 129 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo): 130 _g1h(g1h), _root_cl(root_cl), _nm(NULL), _vo(vo), _failures(false) {} 131 132 void do_oop(oop* p) { do_oop_work(p); } 133 void do_oop(narrowOop* p) { do_oop_work(p); } 134 135 void set_nmethod(nmethod* nm) { _nm = nm; } 136 bool failures() { return _failures; } 137 }; 138 139 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure { 140 G1VerifyCodeRootOopClosure* _oop_cl; 141 142 public: 143 G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl): 144 _oop_cl(oop_cl) {} 145 146 void do_code_blob(CodeBlob* cb) { 147 nmethod* nm = cb->as_nmethod_or_null(); 148 if (nm != NULL) { 149 _oop_cl->set_nmethod(nm); 150 nm->oops_do(_oop_cl); 151 } 152 } 153 }; 154 155 class YoungRefCounterClosure : public OopClosure { 156 G1CollectedHeap* _g1h; 157 int _count; 158 public: 159 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {} 160 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } } 161 void do_oop(narrowOop* p) { ShouldNotReachHere(); } 162 163 int count() { return _count; } 164 void reset_count() { _count = 0; }; 165 }; 166 167 class VerifyCLDClosure: public CLDClosure { 168 YoungRefCounterClosure _young_ref_counter_closure; 169 OopClosure *_oop_closure; 170 public: 171 VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {} 172 void do_cld(ClassLoaderData* cld) { 173 cld->oops_do(_oop_closure, ClassLoaderData::_claim_none); 174 175 _young_ref_counter_closure.reset_count(); 176 cld->oops_do(&_young_ref_counter_closure, ClassLoaderData::_claim_none); 177 if (_young_ref_counter_closure.count() > 0) { 178 guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty.", p2i(cld), _young_ref_counter_closure.count()); 179 } 180 } 181 }; 182 183 class VerifyLivenessOopClosure: public BasicOopIterateClosure { 184 G1CollectedHeap* _g1h; 185 VerifyOption _vo; 186 public: 187 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo): 188 _g1h(g1h), _vo(vo) 189 { } 190 void do_oop(narrowOop *p) { do_oop_work(p); } 191 void do_oop( oop *p) { do_oop_work(p); } 192 193 template <class T> void do_oop_work(T *p) { 194 oop obj = RawAccess<>::oop_load(p); 195 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), 196 "Dead object referenced by a not dead object"); 197 } 198 }; 199 200 class VerifyObjsInRegionClosure: public ObjectClosure { 201 private: 202 G1CollectedHeap* _g1h; 203 size_t _live_bytes; 204 HeapRegion *_hr; 205 VerifyOption _vo; 206 public: 207 // _vo == UsePrevMarking -> use "prev" marking information, 208 // _vo == UseNextMarking -> use "next" marking information, 209 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. 210 VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo) 211 : _live_bytes(0), _hr(hr), _vo(vo) { 212 _g1h = G1CollectedHeap::heap(); 213 } 214 void do_object(oop o) { 215 VerifyLivenessOopClosure isLive(_g1h, _vo); 216 assert(o != NULL, "Huh?"); 217 if (!_g1h->is_obj_dead_cond(o, _vo)) { 218 // If the object is alive according to the full gc mark, 219 // then verify that the marking information agrees. 220 // Note we can't verify the contra-positive of the 221 // above: if the object is dead (according to the mark 222 // word), it may not be marked, or may have been marked 223 // but has since became dead, or may have been allocated 224 // since the last marking. 225 if (_vo == VerifyOption_G1UseFullMarking) { 226 guarantee(!_g1h->is_obj_dead(o), "Full GC marking and concurrent mark mismatch"); 227 } 228 229 o->oop_iterate(&isLive); 230 if (!_hr->obj_allocated_since_prev_marking(o)) { 231 size_t obj_size = o->size(); // Make sure we don't overflow 232 _live_bytes += (obj_size * HeapWordSize); 233 } 234 } 235 } 236 size_t live_bytes() { return _live_bytes; } 237 }; 238 239 class VerifyArchiveOopClosure: public BasicOopIterateClosure { 240 HeapRegion* _hr; 241 public: 242 VerifyArchiveOopClosure(HeapRegion *hr) 243 : _hr(hr) { } 244 void do_oop(narrowOop *p) { do_oop_work(p); } 245 void do_oop( oop *p) { do_oop_work(p); } 246 247 template <class T> void do_oop_work(T *p) { 248 oop obj = RawAccess<>::oop_load(p); 249 250 if (_hr->is_open_archive()) { 251 guarantee(obj == NULL || G1ArchiveAllocator::is_archived_object(obj), 252 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, 253 p2i(p), p2i(obj)); 254 } else { 255 assert(_hr->is_closed_archive(), "should be closed archive region"); 256 guarantee(obj == NULL || G1ArchiveAllocator::is_closed_archive_object(obj), 257 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, 258 p2i(p), p2i(obj)); 259 } 260 } 261 }; 262 263 class VerifyObjectInArchiveRegionClosure: public ObjectClosure { 264 HeapRegion* _hr; 265 public: 266 VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose) 267 : _hr(hr) { } 268 // Verify that all object pointers are to archive regions. 269 void do_object(oop o) { 270 VerifyArchiveOopClosure checkOop(_hr); 271 assert(o != NULL, "Should not be here for NULL oops"); 272 o->oop_iterate(&checkOop); 273 } 274 }; 275 276 // Should be only used at CDS dump time 277 class VerifyReadyForArchivingRegionClosure : public HeapRegionClosure { 278 bool _seen_free; 279 bool _has_holes; 280 bool _has_unexpected_holes; 281 bool _has_humongous; 282 public: 283 bool has_holes() {return _has_holes;} 284 bool has_unexpected_holes() {return _has_unexpected_holes;} 285 bool has_humongous() {return _has_humongous;} 286 287 VerifyReadyForArchivingRegionClosure() : HeapRegionClosure() { 288 _seen_free = false; 289 _has_holes = false; 290 _has_unexpected_holes = false; 291 _has_humongous = false; 292 } 293 virtual bool do_heap_region(HeapRegion* hr) { 294 const char* hole = ""; 295 296 if (hr->is_free()) { 297 _seen_free = true; 298 } else { 299 if (_seen_free) { 300 _has_holes = true; 301 if (hr->is_humongous()) { 302 hole = " hole"; 303 } else { 304 _has_unexpected_holes = true; 305 hole = " hole **** unexpected ****"; 306 } 307 } 308 } 309 if (hr->is_humongous()) { 310 _has_humongous = true; 311 } 312 log_info(gc, region, cds)("HeapRegion " INTPTR_FORMAT " %s%s", p2i(hr->bottom()), hr->get_type_str(), hole); 313 return false; 314 } 315 }; 316 317 // We want all used regions to be moved to the bottom-end of the heap, so we have 318 // a contiguous range of free regions at the top end of the heap. This way, we can 319 // avoid fragmentation while allocating the archive regions. 320 // 321 // Before calling this, a full GC should have been executed with a single worker thread, 322 // so that no old regions would be moved to the middle of the heap. 323 void G1HeapVerifier::verify_ready_for_archiving() { 324 VerifyReadyForArchivingRegionClosure cl; 325 G1CollectedHeap::heap()->heap_region_iterate(&cl); 326 if (cl.has_holes()) { 327 log_warning(gc, verify)("All free regions should be at the top end of the heap, but" 328 " we found holes. This is probably caused by (unmovable) humongous" 329 " allocations or active GCLocker, and may lead to fragmentation while" 330 " writing archive heap memory regions."); 331 } 332 if (cl.has_humongous()) { 333 log_warning(gc, verify)("(Unmovable) humongous regions have been found and" 334 " may lead to fragmentation while" 335 " writing archive heap memory regions."); 336 } 337 } 338 339 class VerifyArchivePointerRegionClosure: public HeapRegionClosure { 340 virtual bool do_heap_region(HeapRegion* r) { 341 if (r->is_archive()) { 342 VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); 343 r->object_iterate(&verify_oop_pointers); 344 } 345 return false; 346 } 347 }; 348 349 void G1HeapVerifier::verify_archive_regions() { 350 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 351 VerifyArchivePointerRegionClosure cl; 352 g1h->heap_region_iterate(&cl); 353 } 354 355 class VerifyRegionClosure: public HeapRegionClosure { 356 private: 357 bool _par; 358 VerifyOption _vo; 359 bool _failures; 360 public: 361 // _vo == UsePrevMarking -> use "prev" marking information, 362 // _vo == UseNextMarking -> use "next" marking information, 363 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS 364 VerifyRegionClosure(bool par, VerifyOption vo) 365 : _par(par), 366 _vo(vo), 367 _failures(false) {} 368 369 bool failures() { 370 return _failures; 371 } 372 373 bool do_heap_region(HeapRegion* r) { 374 guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u", r->hrm_index(), r->index_in_opt_cset()); 375 guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str()); 376 // Humongous and old regions regions might be of any state, so can't check here. 377 guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str()); 378 // Verify that the continues humongous regions' remembered set state matches the 379 // one from the starts humongous region. 380 if (r->is_continues_humongous()) { 381 if (r->rem_set()->get_state_str() != r->humongous_start_region()->rem_set()->get_state_str()) { 382 log_error(gc, verify)("Remset states differ: Region %u (%s) remset %s with starts region %u (%s) remset %s", 383 r->hrm_index(), 384 r->get_short_type_str(), 385 r->rem_set()->get_state_str(), 386 r->humongous_start_region()->hrm_index(), 387 r->humongous_start_region()->get_short_type_str(), 388 r->humongous_start_region()->rem_set()->get_state_str()); 389 _failures = true; 390 } 391 } 392 // For archive regions, verify there are no heap pointers to 393 // non-pinned regions. For all others, verify liveness info. 394 if (r->is_closed_archive()) { 395 VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); 396 r->object_iterate(&verify_oop_pointers); 397 return true; 398 } else if (r->is_open_archive()) { 399 VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo); 400 r->object_iterate(&verify_open_archive_oop); 401 return true; 402 } else if (!r->is_continues_humongous()) { 403 bool failures = false; 404 r->verify(_vo, &failures); 405 if (failures) { 406 _failures = true; 407 } else if (!r->is_starts_humongous()) { 408 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); 409 r->object_iterate(¬_dead_yet_cl); 410 if (_vo != VerifyOption_G1UseNextMarking) { 411 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { 412 log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT, 413 p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes()); 414 _failures = true; 415 } 416 } else { 417 // When vo == UseNextMarking we cannot currently do a sanity 418 // check on the live bytes as the calculation has not been 419 // finalized yet. 420 } 421 } 422 } 423 return false; // stop the region iteration if we hit a failure 424 } 425 }; 426 427 // This is the task used for parallel verification of the heap regions 428 429 class G1ParVerifyTask: public AbstractGangTask { 430 private: 431 G1CollectedHeap* _g1h; 432 VerifyOption _vo; 433 bool _failures; 434 HeapRegionClaimer _hrclaimer; 435 436 public: 437 // _vo == UsePrevMarking -> use "prev" marking information, 438 // _vo == UseNextMarking -> use "next" marking information, 439 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS 440 G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) : 441 AbstractGangTask("Parallel verify task"), 442 _g1h(g1h), 443 _vo(vo), 444 _failures(false), 445 _hrclaimer(g1h->workers()->active_workers()) {} 446 447 bool failures() { 448 return _failures; 449 } 450 451 void work(uint worker_id) { 452 VerifyRegionClosure blk(true, _vo); 453 _g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id); 454 if (blk.failures()) { 455 _failures = true; 456 } 457 } 458 }; 459 460 void G1HeapVerifier::enable_verification_type(G1VerifyType type) { 461 // First enable will clear _enabled_verification_types. 462 if (_enabled_verification_types == G1VerifyAll) { 463 _enabled_verification_types = type; 464 } else { 465 _enabled_verification_types |= type; 466 } 467 } 468 469 bool G1HeapVerifier::should_verify(G1VerifyType type) { 470 return (_enabled_verification_types & type) == type; 471 } 472 473 void G1HeapVerifier::verify(VerifyOption vo) { 474 if (!SafepointSynchronize::is_at_safepoint()) { 475 log_info(gc, verify)("Skipping verification. Not at safepoint."); 476 } 477 478 assert(Thread::current()->is_VM_thread(), 479 "Expected to be executed serially by the VM thread at this point"); 480 481 log_debug(gc, verify)("Roots"); 482 VerifyRootsClosure rootsCl(vo); 483 VerifyCLDClosure cldCl(_g1h, &rootsCl); 484 485 // We apply the relevant closures to all the oops in the 486 // system dictionary, class loader data graph, the string table 487 // and the nmethods in the code cache. 488 G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo); 489 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); 490 491 { 492 G1RootProcessor root_processor(_g1h, 1); 493 root_processor.process_all_roots(&rootsCl, &cldCl, &blobsCl); 494 } 495 496 bool failures = rootsCl.failures() || codeRootsCl.failures(); 497 498 if (!_g1h->policy()->collector_state()->in_full_gc()) { 499 // If we're verifying during a full GC then the region sets 500 // will have been torn down at the start of the GC. Therefore 501 // verifying the region sets will fail. So we only verify 502 // the region sets when not in a full GC. 503 log_debug(gc, verify)("HeapRegionSets"); 504 verify_region_sets(); 505 } 506 507 log_debug(gc, verify)("HeapRegions"); 508 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { 509 510 G1ParVerifyTask task(_g1h, vo); 511 _g1h->workers()->run_task(&task); 512 if (task.failures()) { 513 failures = true; 514 } 515 516 } else { 517 VerifyRegionClosure blk(false, vo); 518 _g1h->heap_region_iterate(&blk); 519 if (blk.failures()) { 520 failures = true; 521 } 522 } 523 524 if (G1StringDedup::is_enabled()) { 525 log_debug(gc, verify)("StrDedup"); 526 G1StringDedup::verify(); 527 } 528 529 if (failures) { 530 log_error(gc, verify)("Heap after failed verification (kind %d):", vo); 531 // It helps to have the per-region information in the output to 532 // help us track down what went wrong. This is why we call 533 // print_extended_on() instead of print_on(). 534 Log(gc, verify) log; 535 ResourceMark rm; 536 LogStream ls(log.error()); 537 _g1h->print_extended_on(&ls); 538 } 539 guarantee(!failures, "there should not have been any failures"); 540 } 541 542 // Heap region set verification 543 544 class VerifyRegionListsClosure : public HeapRegionClosure { 545 private: 546 HeapRegionSet* _old_set; 547 HeapRegionSet* _archive_set; 548 HeapRegionSet* _humongous_set; 549 HeapRegionManager* _hrm; 550 551 public: 552 uint _old_count; 553 uint _archive_count; 554 uint _humongous_count; 555 uint _free_count; 556 557 VerifyRegionListsClosure(HeapRegionSet* old_set, 558 HeapRegionSet* archive_set, 559 HeapRegionSet* humongous_set, 560 HeapRegionManager* hrm) : 561 _old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm), 562 _old_count(), _archive_count(), _humongous_count(), _free_count(){ } 563 564 bool do_heap_region(HeapRegion* hr) { 565 if (hr->is_young()) { 566 // TODO 567 } else if (hr->is_humongous()) { 568 assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index()); 569 _humongous_count++; 570 } else if (hr->is_empty()) { 571 assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); 572 _free_count++; 573 } else if (hr->is_archive()) { 574 assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index()); 575 _archive_count++; 576 } else if (hr->is_old()) { 577 assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index()); 578 _old_count++; 579 } else { 580 // There are no other valid region types. Check for one invalid 581 // one we can identify: pinned without old or humongous set. 582 assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()); 583 ShouldNotReachHere(); 584 } 585 return false; 586 } 587 588 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { 589 guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count); 590 guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count); 591 guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count); 592 guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count); 593 } 594 }; 595 596 void G1HeapVerifier::verify_region_sets() { 597 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 598 599 // First, check the explicit lists. 600 _g1h->_hrm->verify(); 601 602 // Finally, make sure that the region accounting in the lists is 603 // consistent with what we see in the heap. 604 605 VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm); 606 _g1h->heap_region_iterate(&cl); 607 cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm); 608 } 609 610 void G1HeapVerifier::prepare_for_verify() { 611 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 612 _g1h->ensure_parsability(false); 613 } 614 } 615 616 double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* msg) { 617 double verify_time_ms = 0.0; 618 619 if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) { 620 double verify_start = os::elapsedTime(); 621 prepare_for_verify(); 622 Universe::verify(vo, msg); 623 verify_time_ms = (os::elapsedTime() - verify_start) * 1000; 624 } 625 626 return verify_time_ms; 627 } 628 629 void G1HeapVerifier::verify_before_gc(G1VerifyType type) { 630 if (VerifyBeforeGC) { 631 double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "Before GC"); 632 _g1h->phase_times()->record_verify_before_time_ms(verify_time_ms); 633 } 634 } 635 636 void G1HeapVerifier::verify_after_gc(G1VerifyType type) { 637 if (VerifyAfterGC) { 638 double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "After GC"); 639 _g1h->phase_times()->record_verify_after_time_ms(verify_time_ms); 640 } 641 } 642 643 644 #ifndef PRODUCT 645 class G1VerifyCardTableCleanup: public HeapRegionClosure { 646 G1HeapVerifier* _verifier; 647 public: 648 G1VerifyCardTableCleanup(G1HeapVerifier* verifier) 649 : _verifier(verifier) { } 650 virtual bool do_heap_region(HeapRegion* r) { 651 if (r->is_survivor()) { 652 _verifier->verify_dirty_region(r); 653 } else { 654 _verifier->verify_not_dirty_region(r); 655 } 656 return false; 657 } 658 }; 659 660 void G1HeapVerifier::verify_card_table_cleanup() { 661 if (G1VerifyCTCleanup || VerifyAfterGC) { 662 G1VerifyCardTableCleanup cleanup_verifier(this); 663 _g1h->heap_region_iterate(&cleanup_verifier); 664 } 665 } 666 667 void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) { 668 // All of the region should be clean. 669 G1CardTable* ct = _g1h->card_table(); 670 MemRegion mr(hr->bottom(), hr->end()); 671 ct->verify_not_dirty_region(mr); 672 } 673 674 void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) { 675 // We cannot guarantee that [bottom(),end()] is dirty. Threads 676 // dirty allocated blocks as they allocate them. The thread that 677 // retires each region and replaces it with a new one will do a 678 // maximal allocation to fill in [pre_dummy_top(),end()] but will 679 // not dirty that area (one less thing to have to do while holding 680 // a lock). So we can only verify that [bottom(),pre_dummy_top()] 681 // is dirty. 682 G1CardTable* ct = _g1h->card_table(); 683 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); 684 if (hr->is_young()) { 685 ct->verify_g1_young_region(mr); 686 } else { 687 ct->verify_dirty_region(mr); 688 } 689 } 690 691 class G1VerifyDirtyYoungListClosure : public HeapRegionClosure { 692 private: 693 G1HeapVerifier* _verifier; 694 public: 695 G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { } 696 virtual bool do_heap_region(HeapRegion* r) { 697 _verifier->verify_dirty_region(r); 698 return false; 699 } 700 }; 701 702 void G1HeapVerifier::verify_dirty_young_regions() { 703 G1VerifyDirtyYoungListClosure cl(this); 704 _g1h->collection_set()->iterate(&cl); 705 } 706 707 bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap, 708 HeapWord* tams, HeapWord* end) { 709 guarantee(tams <= end, 710 "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)); 711 HeapWord* result = bitmap->get_next_marked_addr(tams, end); 712 if (result < end) { 713 log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result)); 714 log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end)); 715 return false; 716 } 717 return true; 718 } 719 720 bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) { 721 const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap(); 722 const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap(); 723 724 HeapWord* ptams = hr->prev_top_at_mark_start(); 725 HeapWord* ntams = hr->next_top_at_mark_start(); 726 HeapWord* end = hr->end(); 727 728 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end); 729 730 bool res_n = true; 731 // We cannot verify the next bitmap while we are about to clear it. 732 if (!_g1h->collector_state()->clearing_next_bitmap()) { 733 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end); 734 } 735 if (!res_p || !res_n) { 736 log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr)); 737 log_error(gc, verify)("#### Caller: %s", caller); 738 return false; 739 } 740 return true; 741 } 742 743 void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) { 744 if (!G1VerifyBitmaps) { 745 return; 746 } 747 748 guarantee(verify_bitmaps(caller, hr), "bitmap verification"); 749 } 750 751 class G1VerifyBitmapClosure : public HeapRegionClosure { 752 private: 753 const char* _caller; 754 G1HeapVerifier* _verifier; 755 bool _failures; 756 757 public: 758 G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) : 759 _caller(caller), _verifier(verifier), _failures(false) { } 760 761 bool failures() { return _failures; } 762 763 virtual bool do_heap_region(HeapRegion* hr) { 764 bool result = _verifier->verify_bitmaps(_caller, hr); 765 if (!result) { 766 _failures = true; 767 } 768 return false; 769 } 770 }; 771 772 void G1HeapVerifier::check_bitmaps(const char* caller) { 773 if (!G1VerifyBitmaps) { 774 return; 775 } 776 777 G1VerifyBitmapClosure cl(caller, this); 778 _g1h->heap_region_iterate(&cl); 779 guarantee(!cl.failures(), "bitmap verification"); 780 } 781 782 class G1CheckRegionAttrTableClosure : public HeapRegionClosure { 783 private: 784 bool _failures; 785 786 public: 787 G1CheckRegionAttrTableClosure() : HeapRegionClosure(), _failures(false) { } 788 789 virtual bool do_heap_region(HeapRegion* hr) { 790 uint i = hr->hrm_index(); 791 G1HeapRegionAttr region_attr = (G1HeapRegionAttr) G1CollectedHeap::heap()->_region_attr.get_by_index(i); 792 if (hr->is_humongous()) { 793 if (hr->in_collection_set()) { 794 log_error(gc, verify)("## humongous region %u in CSet", i); 795 _failures = true; 796 return true; 797 } 798 if (region_attr.is_in_cset()) { 799 log_error(gc, verify)("## inconsistent region attr type %s for humongous region %u", region_attr.get_type_str(), i); 800 _failures = true; 801 return true; 802 } 803 if (hr->is_continues_humongous() && region_attr.is_humongous()) { 804 log_error(gc, verify)("## inconsistent region attr type %s for continues humongous region %u", region_attr.get_type_str(), i); 805 _failures = true; 806 return true; 807 } 808 } else { 809 if (region_attr.is_humongous()) { 810 log_error(gc, verify)("## inconsistent region attr type %s for non-humongous region %u", region_attr.get_type_str(), i); 811 _failures = true; 812 return true; 813 } 814 if (hr->in_collection_set() != region_attr.is_in_cset()) { 815 log_error(gc, verify)("## in CSet %d / region attr type %s inconsistency for region %u", 816 hr->in_collection_set(), region_attr.get_type_str(), i); 817 _failures = true; 818 return true; 819 } 820 if (region_attr.is_in_cset()) { 821 if (hr->is_archive()) { 822 log_error(gc, verify)("## is_archive in collection set for region %u", i); 823 _failures = true; 824 return true; 825 } 826 if (hr->is_young() != (region_attr.is_young())) { 827 log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u", 828 hr->is_young(), region_attr.get_type_str(), i); 829 _failures = true; 830 return true; 831 } 832 if (hr->is_old() != (region_attr.is_old())) { 833 log_error(gc, verify)("## is_old %d / region attr type %s inconsistency for region %u", 834 hr->is_old(), region_attr.get_type_str(), i); 835 _failures = true; 836 return true; 837 } 838 } 839 } 840 return false; 841 } 842 843 bool failures() const { return _failures; } 844 }; 845 846 bool G1HeapVerifier::check_region_attr_table() { 847 G1CheckRegionAttrTableClosure cl; 848 _g1h->_hrm->iterate(&cl); 849 return !cl.failures(); 850 } 851 #endif // PRODUCT