1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/heapRegion.inline.hpp" 31 #include "gc/g1/heapRegionBounds.inline.hpp" 32 #include "gc/g1/heapRegionManager.inline.hpp" 33 #include "gc/g1/heapRegionRemSet.hpp" 34 #include "gc/shared/genOopClosures.inline.hpp" 35 #include "gc/shared/liveRange.hpp" 36 #include "gc/shared/space.inline.hpp" 37 #include "memory/iterator.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/atomic.inline.hpp" 40 #include "runtime/orderAccess.inline.hpp" 41 42 int HeapRegion::LogOfHRGrainBytes = 0; 43 int HeapRegion::LogOfHRGrainWords = 0; 44 size_t HeapRegion::GrainBytes = 0; 45 size_t HeapRegion::GrainWords = 0; 46 size_t HeapRegion::CardsPerRegion = 0; 47 48 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 49 HeapRegion* hr, 50 G1ParPushHeapRSClosure* cl, 51 CardTableModRefBS::PrecisionStyle precision) : 52 DirtyCardToOopClosure(hr, cl, precision, NULL), 53 _hr(hr), _rs_scan(cl), _g1(g1) { } 54 55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 56 OopClosure* oc) : 57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 58 59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 60 HeapWord* bottom, 61 HeapWord* top) { 62 G1CollectedHeap* g1h = _g1; 63 size_t oop_size; 64 HeapWord* cur = bottom; 65 66 // Start filtering what we add to the remembered set. If the object is 67 // not considered dead, either because it is marked (in the mark bitmap) 68 // or it was allocated after marking finished, then we add it. Otherwise 69 // we can safely ignore the object. 70 if (!g1h->is_obj_dead(oop(cur))) { 71 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr); 72 } else { 73 oop_size = _hr->block_size(cur); 74 } 75 76 cur += oop_size; 77 78 if (cur < top) { 79 oop cur_oop = oop(cur); 80 oop_size = _hr->block_size(cur); 81 HeapWord* next_obj = cur + oop_size; 82 while (next_obj < top) { 83 // Keep filtering the remembered set. 84 if (!g1h->is_obj_dead(cur_oop)) { 85 // Bottom lies entirely below top, so we can call the 86 // non-memRegion version of oop_iterate below. 87 cur_oop->oop_iterate(_rs_scan); 88 } 89 cur = next_obj; 90 cur_oop = oop(cur); 91 oop_size = _hr->block_size(cur); 92 next_obj = cur + oop_size; 93 } 94 95 // Last object. Need to do dead-obj filtering here too. 96 if (!g1h->is_obj_dead(oop(cur))) { 97 oop(cur)->oop_iterate(_rs_scan, mr); 98 } 99 } 100 } 101 102 size_t HeapRegion::max_region_size() { 103 return HeapRegionBounds::max_size(); 104 } 105 106 size_t HeapRegion::min_region_size_in_words() { 107 return HeapRegionBounds::min_size() >> LogHeapWordSize; 108 } 109 110 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 111 size_t region_size = G1HeapRegionSize; 112 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 113 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 114 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 115 HeapRegionBounds::min_size()); 116 } 117 118 int region_size_log = log2_long((jlong) region_size); 119 // Recalculate the region size to make sure it's a power of 120 // 2. This means that region_size is the largest power of 2 that's 121 // <= what we've calculated so far. 122 region_size = ((size_t)1 << region_size_log); 123 124 // Now make sure that we don't go over or under our limits. 125 if (region_size < HeapRegionBounds::min_size()) { 126 region_size = HeapRegionBounds::min_size(); 127 } else if (region_size > HeapRegionBounds::max_size()) { 128 region_size = HeapRegionBounds::max_size(); 129 } 130 131 // And recalculate the log. 132 region_size_log = log2_long((jlong) region_size); 133 134 // Now, set up the globals. 135 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 136 LogOfHRGrainBytes = region_size_log; 137 138 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 139 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 140 141 guarantee(GrainBytes == 0, "we should only set it once"); 142 // The cast to int is safe, given that we've bounded region_size by 143 // MIN_REGION_SIZE and MAX_REGION_SIZE. 144 GrainBytes = region_size; 145 146 guarantee(GrainWords == 0, "we should only set it once"); 147 GrainWords = GrainBytes >> LogHeapWordSize; 148 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 149 150 guarantee(CardsPerRegion == 0, "we should only set it once"); 151 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 152 } 153 154 void HeapRegion::reset_after_compaction() { 155 G1OffsetTableContigSpace::reset_after_compaction(); 156 // After a compaction the mark bitmap is invalid, so we must 157 // treat all objects as being inside the unmarked area. 158 zero_marked_bytes(); 159 init_top_at_mark_start(); 160 } 161 162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 163 assert(_humongous_start_region == NULL, 164 "we should have already filtered out humongous regions"); 165 assert(!in_collection_set(), 166 "Should not clear heap region %u in the collection set", hrm_index()); 167 168 set_allocation_context(AllocationContext::system()); 169 set_young_index_in_cset(-1); 170 uninstall_surv_rate_group(); 171 set_free(); 172 reset_pre_dummy_top(); 173 174 if (!par) { 175 // If this is parallel, this will be done later. 176 HeapRegionRemSet* hrrs = rem_set(); 177 if (locked) { 178 hrrs->clear_locked(); 179 } else { 180 hrrs->clear(); 181 } 182 } 183 zero_marked_bytes(); 184 185 _offsets.resize(HeapRegion::GrainWords); 186 init_top_at_mark_start(); 187 if (clear_space) clear(SpaceDecorator::Mangle); 188 } 189 190 void HeapRegion::par_clear() { 191 assert(used() == 0, "the region should have been already cleared"); 192 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 193 HeapRegionRemSet* hrrs = rem_set(); 194 hrrs->clear(); 195 CardTableModRefBS* ct_bs = 196 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set()); 197 ct_bs->clear(MemRegion(bottom(), end())); 198 } 199 200 void HeapRegion::calc_gc_efficiency() { 201 // GC efficiency is the ratio of how much space would be 202 // reclaimed over how long we predict it would take to reclaim it. 203 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 204 G1CollectorPolicy* g1p = g1h->g1_policy(); 205 206 // Retrieve a prediction of the elapsed time for this region for 207 // a mixed gc because the region will only be evacuated during a 208 // mixed gc. 209 double region_elapsed_time_ms = 210 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 211 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 212 } 213 214 void HeapRegion::set_starts_humongous(HeapWord* obj_top) { 215 assert(!is_humongous(), "sanity / pre-condition"); 216 assert(top() == bottom(), "should be empty"); 217 218 _type.set_starts_humongous(); 219 _humongous_start_region = this; 220 221 _offsets.set_for_starts_humongous(obj_top); 222 } 223 224 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 225 assert(!is_humongous(), "sanity / pre-condition"); 226 assert(top() == bottom(), "should be empty"); 227 assert(first_hr->is_starts_humongous(), "pre-condition"); 228 229 _type.set_continues_humongous(); 230 _humongous_start_region = first_hr; 231 } 232 233 void HeapRegion::clear_humongous() { 234 assert(is_humongous(), "pre-condition"); 235 236 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 237 _humongous_start_region = NULL; 238 } 239 240 HeapRegion::HeapRegion(uint hrm_index, 241 G1BlockOffsetSharedArray* sharedOffsetArray, 242 MemRegion mr) : 243 G1OffsetTableContigSpace(sharedOffsetArray, mr), 244 _hrm_index(hrm_index), 245 _allocation_context(AllocationContext::system()), 246 _humongous_start_region(NULL), 247 _next_in_special_set(NULL), 248 _evacuation_failed(false), 249 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 250 _next_young_region(NULL), 251 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), 252 #ifdef ASSERT 253 _containing_set(NULL), 254 #endif // ASSERT 255 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 256 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 257 _predicted_bytes_to_copy(0) 258 { 259 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 260 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 261 262 initialize(mr); 263 } 264 265 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 266 assert(_rem_set->is_empty(), "Remembered set must be empty"); 267 268 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); 269 270 hr_clear(false /*par*/, false /*clear_space*/); 271 set_top(bottom()); 272 record_timestamp(); 273 } 274 275 CompactibleSpace* HeapRegion::next_compaction_space() const { 276 return G1CollectedHeap::heap()->next_compaction_region(this); 277 } 278 279 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 280 bool during_conc_mark) { 281 // We always recreate the prev marking info and we'll explicitly 282 // mark all objects we find to be self-forwarded on the prev 283 // bitmap. So all objects need to be below PTAMS. 284 _prev_marked_bytes = 0; 285 286 if (during_initial_mark) { 287 // During initial-mark, we'll also explicitly mark all objects 288 // we find to be self-forwarded on the next bitmap. So all 289 // objects need to be below NTAMS. 290 _next_top_at_mark_start = top(); 291 _next_marked_bytes = 0; 292 } else if (during_conc_mark) { 293 // During concurrent mark, all objects in the CSet (including 294 // the ones we find to be self-forwarded) are implicitly live. 295 // So all objects need to be above NTAMS. 296 _next_top_at_mark_start = bottom(); 297 _next_marked_bytes = 0; 298 } 299 } 300 301 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 302 bool during_conc_mark, 303 size_t marked_bytes) { 304 assert(marked_bytes <= used(), 305 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 306 _prev_top_at_mark_start = top(); 307 _prev_marked_bytes = marked_bytes; 308 } 309 310 HeapWord* 311 HeapRegion::object_iterate_mem_careful(MemRegion mr, 312 ObjectClosure* cl) { 313 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 314 // We used to use "block_start_careful" here. But we're actually happy 315 // to update the BOT while we do this... 316 HeapWord* cur = block_start(mr.start()); 317 mr = mr.intersection(used_region()); 318 if (mr.is_empty()) return NULL; 319 // Otherwise, find the obj that extends onto mr.start(). 320 321 assert(cur <= mr.start() 322 && (oop(cur)->klass_or_null() == NULL || 323 cur + oop(cur)->size() > mr.start()), 324 "postcondition of block_start"); 325 oop obj; 326 while (cur < mr.end()) { 327 obj = oop(cur); 328 if (obj->klass_or_null() == NULL) { 329 // Ran into an unparseable point. 330 return cur; 331 } else if (!g1h->is_obj_dead(obj)) { 332 cl->do_object(obj); 333 } 334 cur += block_size(cur); 335 } 336 return NULL; 337 } 338 339 HeapWord* 340 HeapRegion:: 341 oops_on_card_seq_iterate_careful(MemRegion mr, 342 FilterOutOfRegionClosure* cl, 343 bool filter_young, 344 jbyte* card_ptr) { 345 // Currently, we should only have to clean the card if filter_young 346 // is true and vice versa. 347 if (filter_young) { 348 assert(card_ptr != NULL, "pre-condition"); 349 } else { 350 assert(card_ptr == NULL, "pre-condition"); 351 } 352 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 353 354 // If we're within a stop-world GC, then we might look at a card in a 355 // GC alloc region that extends onto a GC LAB, which may not be 356 // parseable. Stop such at the "scan_top" of the region. 357 if (g1h->is_gc_active()) { 358 mr = mr.intersection(MemRegion(bottom(), scan_top())); 359 } else { 360 mr = mr.intersection(used_region()); 361 } 362 if (mr.is_empty()) return NULL; 363 // Otherwise, find the obj that extends onto mr.start(). 364 365 // The intersection of the incoming mr (for the card) and the 366 // allocated part of the region is non-empty. This implies that 367 // we have actually allocated into this region. The code in 368 // G1CollectedHeap.cpp that allocates a new region sets the 369 // is_young tag on the region before allocating. Thus we 370 // safely know if this region is young. 371 if (is_young() && filter_young) { 372 return NULL; 373 } 374 375 assert(!is_young(), "check value of filter_young"); 376 377 // We can only clean the card here, after we make the decision that 378 // the card is not young. And we only clean the card if we have been 379 // asked to (i.e., card_ptr != NULL). 380 if (card_ptr != NULL) { 381 *card_ptr = CardTableModRefBS::clean_card_val(); 382 // We must complete this write before we do any of the reads below. 383 OrderAccess::storeload(); 384 } 385 386 // Cache the boundaries of the memory region in some const locals 387 HeapWord* const start = mr.start(); 388 HeapWord* const end = mr.end(); 389 390 // We used to use "block_start_careful" here. But we're actually happy 391 // to update the BOT while we do this... 392 HeapWord* cur = block_start(start); 393 assert(cur <= start, "Postcondition"); 394 395 oop obj; 396 397 HeapWord* next = cur; 398 do { 399 cur = next; 400 obj = oop(cur); 401 if (obj->klass_or_null() == NULL) { 402 // Ran into an unparseable point. 403 return cur; 404 } 405 // Otherwise... 406 next = cur + block_size(cur); 407 } while (next <= start); 408 409 // If we finish the above loop...We have a parseable object that 410 // begins on or before the start of the memory region, and ends 411 // inside or spans the entire region. 412 assert(cur <= start, "Loop postcondition"); 413 assert(obj->klass_or_null() != NULL, "Loop postcondition"); 414 415 do { 416 obj = oop(cur); 417 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant"); 418 if (obj->klass_or_null() == NULL) { 419 // Ran into an unparseable point. 420 return cur; 421 } 422 423 // Advance the current pointer. "obj" still points to the object to iterate. 424 cur = cur + block_size(cur); 425 426 if (!g1h->is_obj_dead(obj)) { 427 // Non-objArrays are sometimes marked imprecise at the object start. We 428 // always need to iterate over them in full. 429 // We only iterate over object arrays in full if they are completely contained 430 // in the memory region. 431 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 432 obj->oop_iterate(cl); 433 } else { 434 obj->oop_iterate(cl, mr); 435 } 436 } 437 } while (cur < end); 438 439 return NULL; 440 } 441 442 // Code roots support 443 444 void HeapRegion::add_strong_code_root(nmethod* nm) { 445 HeapRegionRemSet* hrrs = rem_set(); 446 hrrs->add_strong_code_root(nm); 447 } 448 449 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 450 assert_locked_or_safepoint(CodeCache_lock); 451 HeapRegionRemSet* hrrs = rem_set(); 452 hrrs->add_strong_code_root_locked(nm); 453 } 454 455 void HeapRegion::remove_strong_code_root(nmethod* nm) { 456 HeapRegionRemSet* hrrs = rem_set(); 457 hrrs->remove_strong_code_root(nm); 458 } 459 460 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 461 HeapRegionRemSet* hrrs = rem_set(); 462 hrrs->strong_code_roots_do(blk); 463 } 464 465 class VerifyStrongCodeRootOopClosure: public OopClosure { 466 const HeapRegion* _hr; 467 nmethod* _nm; 468 bool _failures; 469 bool _has_oops_in_region; 470 471 template <class T> void do_oop_work(T* p) { 472 T heap_oop = oopDesc::load_heap_oop(p); 473 if (!oopDesc::is_null(heap_oop)) { 474 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 475 476 // Note: not all the oops embedded in the nmethod are in the 477 // current region. We only look at those which are. 478 if (_hr->is_in(obj)) { 479 // Object is in the region. Check that its less than top 480 if (_hr->top() <= (HeapWord*)obj) { 481 // Object is above top 482 gclog_or_tty->print_cr("Object " PTR_FORMAT " in region " 483 "[" PTR_FORMAT ", " PTR_FORMAT ") is above " 484 "top " PTR_FORMAT, 485 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 486 _failures = true; 487 return; 488 } 489 // Nmethod has at least one oop in the current region 490 _has_oops_in_region = true; 491 } 492 } 493 } 494 495 public: 496 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 497 _hr(hr), _failures(false), _has_oops_in_region(false) {} 498 499 void do_oop(narrowOop* p) { do_oop_work(p); } 500 void do_oop(oop* p) { do_oop_work(p); } 501 502 bool failures() { return _failures; } 503 bool has_oops_in_region() { return _has_oops_in_region; } 504 }; 505 506 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 507 const HeapRegion* _hr; 508 bool _failures; 509 public: 510 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 511 _hr(hr), _failures(false) {} 512 513 void do_code_blob(CodeBlob* cb) { 514 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 515 if (nm != NULL) { 516 // Verify that the nemthod is live 517 if (!nm->is_alive()) { 518 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " 519 PTR_FORMAT " in its strong code roots", 520 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 521 _failures = true; 522 } else { 523 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 524 nm->oops_do(&oop_cl); 525 if (!oop_cl.has_oops_in_region()) { 526 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " 527 PTR_FORMAT " in its strong code roots " 528 "with no pointers into region", 529 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 530 _failures = true; 531 } else if (oop_cl.failures()) { 532 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has other " 533 "failures for nmethod " PTR_FORMAT, 534 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 535 _failures = true; 536 } 537 } 538 } 539 } 540 541 bool failures() { return _failures; } 542 }; 543 544 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 545 if (!G1VerifyHeapRegionCodeRoots) { 546 // We're not verifying code roots. 547 return; 548 } 549 if (vo == VerifyOption_G1UseMarkWord) { 550 // Marking verification during a full GC is performed after class 551 // unloading, code cache unloading, etc so the strong code roots 552 // attached to each heap region are in an inconsistent state. They won't 553 // be consistent until the strong code roots are rebuilt after the 554 // actual GC. Skip verifying the strong code roots in this particular 555 // time. 556 assert(VerifyDuringGC, "only way to get here"); 557 return; 558 } 559 560 HeapRegionRemSet* hrrs = rem_set(); 561 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 562 563 // if this region is empty then there should be no entries 564 // on its strong code root list 565 if (is_empty()) { 566 if (strong_code_roots_length > 0) { 567 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is empty " 568 "but has " SIZE_FORMAT " code root entries", 569 p2i(bottom()), p2i(end()), strong_code_roots_length); 570 *failures = true; 571 } 572 return; 573 } 574 575 if (is_continues_humongous()) { 576 if (strong_code_roots_length > 0) { 577 gclog_or_tty->print_cr("region " HR_FORMAT " is a continuation of a humongous " 578 "region but has " SIZE_FORMAT " code root entries", 579 HR_FORMAT_PARAMS(this), strong_code_roots_length); 580 *failures = true; 581 } 582 return; 583 } 584 585 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 586 strong_code_roots_do(&cb_cl); 587 588 if (cb_cl.failures()) { 589 *failures = true; 590 } 591 } 592 593 void HeapRegion::print() const { print_on(gclog_or_tty); } 594 void HeapRegion::print_on(outputStream* st) const { 595 st->print("AC%4u", allocation_context()); 596 597 st->print(" %2s", get_short_type_str()); 598 if (in_collection_set()) 599 st->print(" CS"); 600 else 601 st->print(" "); 602 st->print(" TS %5d", _gc_time_stamp); 603 st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT, 604 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start())); 605 G1OffsetTableContigSpace::print_on(st); 606 } 607 608 class VerifyLiveClosure: public OopClosure { 609 private: 610 G1CollectedHeap* _g1h; 611 CardTableModRefBS* _bs; 612 oop _containing_obj; 613 bool _failures; 614 int _n_failures; 615 VerifyOption _vo; 616 public: 617 // _vo == UsePrevMarking -> use "prev" marking information, 618 // _vo == UseNextMarking -> use "next" marking information, 619 // _vo == UseMarkWord -> use mark word from object header. 620 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 621 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 622 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) 623 { } 624 625 void set_containing_obj(oop obj) { 626 _containing_obj = obj; 627 } 628 629 bool failures() { return _failures; } 630 int n_failures() { return _n_failures; } 631 632 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 633 virtual void do_oop( oop* p) { do_oop_work(p); } 634 635 void print_object(outputStream* out, oop obj) { 636 #ifdef PRODUCT 637 Klass* k = obj->klass(); 638 const char* class_name = k->external_name(); 639 out->print_cr("class name %s", class_name); 640 #else // PRODUCT 641 obj->print_on(out); 642 #endif // PRODUCT 643 } 644 645 template <class T> 646 void do_oop_work(T* p) { 647 assert(_containing_obj != NULL, "Precondition"); 648 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 649 "Precondition"); 650 T heap_oop = oopDesc::load_heap_oop(p); 651 if (!oopDesc::is_null(heap_oop)) { 652 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 653 bool failed = false; 654 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 655 MutexLockerEx x(ParGCRareEvent_lock, 656 Mutex::_no_safepoint_check_flag); 657 658 if (!_failures) { 659 gclog_or_tty->cr(); 660 gclog_or_tty->print_cr("----------"); 661 } 662 if (!_g1h->is_in_closed_subset(obj)) { 663 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 664 gclog_or_tty->print_cr("Field " PTR_FORMAT 665 " of live obj " PTR_FORMAT " in region " 666 "[" PTR_FORMAT ", " PTR_FORMAT ")", 667 p2i(p), p2i(_containing_obj), 668 p2i(from->bottom()), p2i(from->end())); 669 print_object(gclog_or_tty, _containing_obj); 670 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " not in the heap", 671 p2i(obj)); 672 } else { 673 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 674 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 675 gclog_or_tty->print_cr("Field " PTR_FORMAT 676 " of live obj " PTR_FORMAT " in region " 677 "[" PTR_FORMAT ", " PTR_FORMAT ")", 678 p2i(p), p2i(_containing_obj), 679 p2i(from->bottom()), p2i(from->end())); 680 print_object(gclog_or_tty, _containing_obj); 681 gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region " 682 "[" PTR_FORMAT ", " PTR_FORMAT ")", 683 p2i(obj), p2i(to->bottom()), p2i(to->end())); 684 print_object(gclog_or_tty, obj); 685 } 686 gclog_or_tty->print_cr("----------"); 687 gclog_or_tty->flush(); 688 _failures = true; 689 failed = true; 690 _n_failures++; 691 } 692 693 if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) { 694 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 695 HeapRegion* to = _g1h->heap_region_containing(obj); 696 if (from != NULL && to != NULL && 697 from != to && 698 !to->is_pinned()) { 699 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 700 jbyte cv_field = *_bs->byte_for_const(p); 701 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 702 703 bool is_bad = !(from->is_young() 704 || to->rem_set()->contains_reference(p) 705 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 706 (_containing_obj->is_objArray() ? 707 cv_field == dirty 708 : cv_obj == dirty || cv_field == dirty)); 709 if (is_bad) { 710 MutexLockerEx x(ParGCRareEvent_lock, 711 Mutex::_no_safepoint_check_flag); 712 713 if (!_failures) { 714 gclog_or_tty->cr(); 715 gclog_or_tty->print_cr("----------"); 716 } 717 gclog_or_tty->print_cr("Missing rem set entry:"); 718 gclog_or_tty->print_cr("Field " PTR_FORMAT " " 719 "of obj " PTR_FORMAT ", " 720 "in region " HR_FORMAT, 721 p2i(p), p2i(_containing_obj), 722 HR_FORMAT_PARAMS(from)); 723 _containing_obj->print_on(gclog_or_tty); 724 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " " 725 "in region " HR_FORMAT, 726 p2i(obj), 727 HR_FORMAT_PARAMS(to)); 728 obj->print_on(gclog_or_tty); 729 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 730 cv_obj, cv_field); 731 gclog_or_tty->print_cr("----------"); 732 gclog_or_tty->flush(); 733 _failures = true; 734 if (!failed) _n_failures++; 735 } 736 } 737 } 738 } 739 } 740 }; 741 742 // This really ought to be commoned up into OffsetTableContigSpace somehow. 743 // We would need a mechanism to make that code skip dead objects. 744 745 void HeapRegion::verify(VerifyOption vo, 746 bool* failures) const { 747 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 748 *failures = false; 749 HeapWord* p = bottom(); 750 HeapWord* prev_p = NULL; 751 VerifyLiveClosure vl_cl(g1, vo); 752 bool is_region_humongous = is_humongous(); 753 size_t object_num = 0; 754 while (p < top()) { 755 oop obj = oop(p); 756 size_t obj_size = block_size(p); 757 object_num += 1; 758 759 if (is_region_humongous != g1->is_humongous(obj_size) && 760 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. 761 gclog_or_tty->print_cr("obj " PTR_FORMAT " is of %shumongous size (" 762 SIZE_FORMAT " words) in a %shumongous region", 763 p2i(p), g1->is_humongous(obj_size) ? "" : "non-", 764 obj_size, is_region_humongous ? "" : "non-"); 765 *failures = true; 766 return; 767 } 768 769 if (!g1->is_obj_dead_cond(obj, this, vo)) { 770 if (obj->is_oop()) { 771 Klass* klass = obj->klass(); 772 bool is_metaspace_object = Metaspace::contains(klass) || 773 (vo == VerifyOption_G1UsePrevMarking && 774 ClassLoaderDataGraph::unload_list_contains(klass)); 775 if (!is_metaspace_object) { 776 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " " 777 "not metadata", p2i(klass), p2i(obj)); 778 *failures = true; 779 return; 780 } else if (!klass->is_klass()) { 781 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " " 782 "not a klass", p2i(klass), p2i(obj)); 783 *failures = true; 784 return; 785 } else { 786 vl_cl.set_containing_obj(obj); 787 obj->oop_iterate_no_header(&vl_cl); 788 if (vl_cl.failures()) { 789 *failures = true; 790 } 791 if (G1MaxVerifyFailures >= 0 && 792 vl_cl.n_failures() >= G1MaxVerifyFailures) { 793 return; 794 } 795 } 796 } else { 797 gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj)); 798 *failures = true; 799 return; 800 } 801 } 802 prev_p = p; 803 p += obj_size; 804 } 805 806 if (!is_young() && !is_empty()) { 807 _offsets.verify(); 808 } 809 810 if (is_region_humongous) { 811 oop obj = oop(this->humongous_start_region()->bottom()); 812 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 813 gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 814 } 815 } 816 817 if (!is_region_humongous && p != top()) { 818 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " " 819 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 820 *failures = true; 821 return; 822 } 823 824 HeapWord* the_end = end(); 825 // Do some extra BOT consistency checking for addresses in the 826 // range [top, end). BOT look-ups in this range should yield 827 // top. No point in doing that if top == end (there's nothing there). 828 if (p < the_end) { 829 // Look up top 830 HeapWord* addr_1 = p; 831 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 832 if (b_start_1 != p) { 833 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " " 834 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 835 p2i(addr_1), p2i(b_start_1), p2i(p)); 836 *failures = true; 837 return; 838 } 839 840 // Look up top + 1 841 HeapWord* addr_2 = p + 1; 842 if (addr_2 < the_end) { 843 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 844 if (b_start_2 != p) { 845 gclog_or_tty->print_cr("BOT look up for top + 1: " PTR_FORMAT " " 846 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 847 p2i(addr_2), p2i(b_start_2), p2i(p)); 848 *failures = true; 849 return; 850 } 851 } 852 853 // Look up an address between top and end 854 size_t diff = pointer_delta(the_end, p) / 2; 855 HeapWord* addr_3 = p + diff; 856 if (addr_3 < the_end) { 857 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 858 if (b_start_3 != p) { 859 gclog_or_tty->print_cr("BOT look up for top + diff: " PTR_FORMAT " " 860 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 861 p2i(addr_3), p2i(b_start_3), p2i(p)); 862 *failures = true; 863 return; 864 } 865 } 866 867 // Look up end - 1 868 HeapWord* addr_4 = the_end - 1; 869 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 870 if (b_start_4 != p) { 871 gclog_or_tty->print_cr("BOT look up for end - 1: " PTR_FORMAT " " 872 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 873 p2i(addr_4), p2i(b_start_4), p2i(p)); 874 *failures = true; 875 return; 876 } 877 } 878 879 if (is_region_humongous && object_num > 1) { 880 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is humongous " 881 "but has " SIZE_FORMAT ", objects", 882 p2i(bottom()), p2i(end()), object_num); 883 *failures = true; 884 return; 885 } 886 887 verify_strong_code_roots(vo, failures); 888 } 889 890 void HeapRegion::verify() const { 891 bool dummy = false; 892 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 893 } 894 895 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 896 scan_and_forward(this, cp); 897 } 898 899 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 900 // away eventually. 901 902 void G1OffsetTableContigSpace::clear(bool mangle_space) { 903 set_top(bottom()); 904 _scan_top = bottom(); 905 CompactibleSpace::clear(mangle_space); 906 reset_bot(); 907 } 908 909 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 910 Space::set_bottom(new_bottom); 911 _offsets.set_bottom(new_bottom); 912 } 913 914 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 915 assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords"); 916 Space::set_end(new_end); 917 _offsets.resize(new_end - bottom()); 918 } 919 920 #ifndef PRODUCT 921 void G1OffsetTableContigSpace::mangle_unused_area() { 922 mangle_unused_area_complete(); 923 } 924 925 void G1OffsetTableContigSpace::mangle_unused_area_complete() { 926 SpaceMangler::mangle_region(MemRegion(top(), end())); 927 } 928 #endif 929 930 void G1OffsetTableContigSpace::print() const { 931 print_short(); 932 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 933 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 934 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); 935 } 936 937 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 938 return _offsets.initialize_threshold(); 939 } 940 941 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 942 HeapWord* end) { 943 _offsets.alloc_block(start, end); 944 return _offsets.threshold(); 945 } 946 947 HeapWord* G1OffsetTableContigSpace::scan_top() const { 948 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 949 HeapWord* local_top = top(); 950 OrderAccess::loadload(); 951 const unsigned local_time_stamp = _gc_time_stamp; 952 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); 953 if (local_time_stamp < g1h->get_gc_time_stamp()) { 954 return local_top; 955 } else { 956 return _scan_top; 957 } 958 } 959 960 void G1OffsetTableContigSpace::record_timestamp() { 961 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 962 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 963 964 if (_gc_time_stamp < curr_gc_time_stamp) { 965 // Setting the time stamp here tells concurrent readers to look at 966 // scan_top to know the maximum allowed address to look at. 967 968 // scan_top should be bottom for all regions except for the 969 // retained old alloc region which should have scan_top == top 970 HeapWord* st = _scan_top; 971 guarantee(st == _bottom || st == _top, "invariant"); 972 973 _gc_time_stamp = curr_gc_time_stamp; 974 } 975 } 976 977 void G1OffsetTableContigSpace::record_retained_region() { 978 // scan_top is the maximum address where it's safe for the next gc to 979 // scan this region. 980 _scan_top = top(); 981 } 982 983 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { 984 object_iterate(blk); 985 } 986 987 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { 988 HeapWord* p = bottom(); 989 while (p < top()) { 990 if (block_is_obj(p)) { 991 blk->do_object(oop(p)); 992 } 993 p += block_size(p); 994 } 995 } 996 997 G1OffsetTableContigSpace:: 998 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 999 MemRegion mr) : 1000 _offsets(sharedOffsetArray, mr), 1001 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1002 _gc_time_stamp(0) 1003 { 1004 _offsets.set_space(this); 1005 } 1006 1007 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1008 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1009 _top = bottom(); 1010 _scan_top = bottom(); 1011 set_saved_mark_word(NULL); 1012 reset_bot(); 1013 } 1014