1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp" 32 #include "gc_implementation/g1/heapRegionRemSet.hpp" 33 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 34 #include "gc_implementation/shared/liveRange.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/iterator.hpp" 37 #include "memory/space.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/atomic.inline.hpp" 40 #include "runtime/orderAccess.inline.hpp" 41 42 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 43 44 int HeapRegion::LogOfHRGrainBytes = 0; 45 int HeapRegion::LogOfHRGrainWords = 0; 46 size_t HeapRegion::GrainBytes = 0; 47 size_t HeapRegion::GrainWords = 0; 48 size_t HeapRegion::CardsPerRegion = 0; 49 50 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 51 HeapRegion* hr, 52 G1ParPushHeapRSClosure* cl, 53 CardTableModRefBS::PrecisionStyle precision) : 54 DirtyCardToOopClosure(hr, cl, precision, NULL), 55 _hr(hr), _rs_scan(cl), _g1(g1) { } 56 57 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 58 OopClosure* oc) : 59 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 60 61 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 62 HeapWord* bottom, 63 HeapWord* top) { 64 G1CollectedHeap* g1h = _g1; 65 size_t oop_size; 66 HeapWord* cur = bottom; 67 68 // Start filtering what we add to the remembered set. If the object is 69 // not considered dead, either because it is marked (in the mark bitmap) 70 // or it was allocated after marking finished, then we add it. Otherwise 71 // we can safely ignore the object. 72 if (!g1h->is_obj_dead(oop(cur), _hr)) { 73 oop_size = oop(cur)->oop_iterate(_rs_scan, mr); 74 } else { 75 oop_size = _hr->block_size(cur); 76 } 77 78 cur += oop_size; 79 80 if (cur < top) { 81 oop cur_oop = oop(cur); 82 oop_size = _hr->block_size(cur); 83 HeapWord* next_obj = cur + oop_size; 84 while (next_obj < top) { 85 // Keep filtering the remembered set. 86 if (!g1h->is_obj_dead(cur_oop, _hr)) { 87 // Bottom lies entirely below top, so we can call the 88 // non-memRegion version of oop_iterate below. 89 cur_oop->oop_iterate(_rs_scan); 90 } 91 cur = next_obj; 92 cur_oop = oop(cur); 93 oop_size = _hr->block_size(cur); 94 next_obj = cur + oop_size; 95 } 96 97 // Last object. Need to do dead-obj filtering here too. 98 if (!g1h->is_obj_dead(oop(cur), _hr)) { 99 oop(cur)->oop_iterate(_rs_scan, mr); 100 } 101 } 102 } 103 104 size_t HeapRegion::max_region_size() { 105 return HeapRegionBounds::max_size(); 106 } 107 108 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 109 uintx region_size = G1HeapRegionSize; 110 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 111 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 112 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 113 (uintx) HeapRegionBounds::min_size()); 114 } 115 116 int region_size_log = log2_long((jlong) region_size); 117 // Recalculate the region size to make sure it's a power of 118 // 2. This means that region_size is the largest power of 2 that's 119 // <= what we've calculated so far. 120 region_size = ((uintx)1 << region_size_log); 121 122 // Now make sure that we don't go over or under our limits. 123 if (region_size < HeapRegionBounds::min_size()) { 124 region_size = HeapRegionBounds::min_size(); 125 } else if (region_size > HeapRegionBounds::max_size()) { 126 region_size = HeapRegionBounds::max_size(); 127 } 128 129 // And recalculate the log. 130 region_size_log = log2_long((jlong) region_size); 131 132 // Now, set up the globals. 133 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 134 LogOfHRGrainBytes = region_size_log; 135 136 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 137 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 138 139 guarantee(GrainBytes == 0, "we should only set it once"); 140 // The cast to int is safe, given that we've bounded region_size by 141 // MIN_REGION_SIZE and MAX_REGION_SIZE. 142 GrainBytes = (size_t)region_size; 143 144 guarantee(GrainWords == 0, "we should only set it once"); 145 GrainWords = GrainBytes >> LogHeapWordSize; 146 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 147 148 guarantee(CardsPerRegion == 0, "we should only set it once"); 149 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 150 } 151 152 void HeapRegion::reset_after_compaction() { 153 G1OffsetTableContigSpace::reset_after_compaction(); 154 // After a compaction the mark bitmap is invalid, so we must 155 // treat all objects as being inside the unmarked area. 156 zero_marked_bytes(); 157 init_top_at_mark_start(); 158 } 159 160 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 161 assert(_humongous_start_region == NULL, 162 "we should have already filtered out humongous regions"); 163 assert(_end == orig_end(), 164 "we should have already filtered out humongous regions"); 165 assert(!_in_collection_set, 166 err_msg("Should not clear heap region %u in the collection set", hrm_index())); 167 168 set_allocation_context(AllocationContext::system()); 169 set_young_index_in_cset(-1); 170 uninstall_surv_rate_group(); 171 set_free(); 172 reset_pre_dummy_top(); 173 174 if (!par) { 175 // If this is parallel, this will be done later. 176 HeapRegionRemSet* hrrs = rem_set(); 177 if (locked) { 178 hrrs->clear_locked(); 179 } else { 180 hrrs->clear(); 181 } 182 } 183 zero_marked_bytes(); 184 185 _offsets.resize(HeapRegion::GrainWords); 186 init_top_at_mark_start(); 187 if (clear_space) clear(SpaceDecorator::Mangle); 188 } 189 190 void HeapRegion::par_clear() { 191 assert(used() == 0, "the region should have been already cleared"); 192 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 193 HeapRegionRemSet* hrrs = rem_set(); 194 hrrs->clear(); 195 CardTableModRefBS* ct_bs = 196 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 197 ct_bs->clear(MemRegion(bottom(), end())); 198 } 199 200 void HeapRegion::calc_gc_efficiency() { 201 // GC efficiency is the ratio of how much space would be 202 // reclaimed over how long we predict it would take to reclaim it. 203 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 204 G1CollectorPolicy* g1p = g1h->g1_policy(); 205 206 // Retrieve a prediction of the elapsed time for this region for 207 // a mixed gc because the region will only be evacuated during a 208 // mixed gc. 209 double region_elapsed_time_ms = 210 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 211 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 212 } 213 214 void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) { 215 assert(!is_humongous(), "sanity / pre-condition"); 216 assert(end() == orig_end(), 217 "Should be normal before the humongous object allocation"); 218 assert(top() == bottom(), "should be empty"); 219 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 220 221 _type.set_starts_humongous(); 222 _humongous_start_region = this; 223 224 set_end(new_end); 225 _offsets.set_for_starts_humongous(new_top); 226 } 227 228 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 229 assert(!is_humongous(), "sanity / pre-condition"); 230 assert(end() == orig_end(), 231 "Should be normal before the humongous object allocation"); 232 assert(top() == bottom(), "should be empty"); 233 assert(first_hr->is_starts_humongous(), "pre-condition"); 234 235 _type.set_continues_humongous(); 236 _humongous_start_region = first_hr; 237 } 238 239 void HeapRegion::clear_humongous() { 240 assert(is_humongous(), "pre-condition"); 241 242 if (is_starts_humongous()) { 243 assert(top() <= end(), "pre-condition"); 244 set_end(orig_end()); 245 if (top() > end()) { 246 // at least one "continues humongous" region after it 247 set_top(end()); 248 } 249 } else { 250 // continues humongous 251 assert(end() == orig_end(), "sanity"); 252 } 253 254 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 255 _humongous_start_region = NULL; 256 } 257 258 HeapRegion::HeapRegion(uint hrm_index, 259 G1BlockOffsetSharedArray* sharedOffsetArray, 260 MemRegion mr) : 261 G1OffsetTableContigSpace(sharedOffsetArray, mr), 262 _hrm_index(hrm_index), 263 _allocation_context(AllocationContext::system()), 264 _humongous_start_region(NULL), 265 _in_collection_set(false), 266 _next_in_special_set(NULL), 267 _evacuation_failed(false), 268 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 269 _next_young_region(NULL), 270 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), 271 #ifdef ASSERT 272 _containing_set(NULL), 273 #endif // ASSERT 274 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 275 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 276 _predicted_bytes_to_copy(0) 277 { 278 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 279 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 280 281 initialize(mr); 282 } 283 284 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 285 assert(_rem_set->is_empty(), "Remembered set must be empty"); 286 287 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); 288 289 hr_clear(false /*par*/, false /*clear_space*/); 290 set_top(bottom()); 291 record_timestamp(); 292 293 assert(mr.end() == orig_end(), 294 err_msg("Given region end address " PTR_FORMAT " should match exactly " 295 "bottom plus one region size, i.e. " PTR_FORMAT, 296 p2i(mr.end()), p2i(orig_end()))); 297 } 298 299 CompactibleSpace* HeapRegion::next_compaction_space() const { 300 return G1CollectedHeap::heap()->next_compaction_region(this); 301 } 302 303 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 304 bool during_conc_mark) { 305 // We always recreate the prev marking info and we'll explicitly 306 // mark all objects we find to be self-forwarded on the prev 307 // bitmap. So all objects need to be below PTAMS. 308 _prev_marked_bytes = 0; 309 310 if (during_initial_mark) { 311 // During initial-mark, we'll also explicitly mark all objects 312 // we find to be self-forwarded on the next bitmap. So all 313 // objects need to be below NTAMS. 314 _next_top_at_mark_start = top(); 315 _next_marked_bytes = 0; 316 } else if (during_conc_mark) { 317 // During concurrent mark, all objects in the CSet (including 318 // the ones we find to be self-forwarded) are implicitly live. 319 // So all objects need to be above NTAMS. 320 _next_top_at_mark_start = bottom(); 321 _next_marked_bytes = 0; 322 } 323 } 324 325 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 326 bool during_conc_mark, 327 size_t marked_bytes) { 328 assert(marked_bytes <= used(), 329 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, marked_bytes, used())); 330 _prev_top_at_mark_start = top(); 331 _prev_marked_bytes = marked_bytes; 332 } 333 334 HeapWord* 335 HeapRegion::object_iterate_mem_careful(MemRegion mr, 336 ObjectClosure* cl) { 337 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 338 // We used to use "block_start_careful" here. But we're actually happy 339 // to update the BOT while we do this... 340 HeapWord* cur = block_start(mr.start()); 341 mr = mr.intersection(used_region()); 342 if (mr.is_empty()) return NULL; 343 // Otherwise, find the obj that extends onto mr.start(). 344 345 assert(cur <= mr.start() 346 && (oop(cur)->klass_or_null() == NULL || 347 cur + oop(cur)->size() > mr.start()), 348 "postcondition of block_start"); 349 oop obj; 350 while (cur < mr.end()) { 351 obj = oop(cur); 352 if (obj->klass_or_null() == NULL) { 353 // Ran into an unparseable point. 354 return cur; 355 } else if (!g1h->is_obj_dead(obj)) { 356 cl->do_object(obj); 357 } 358 cur += block_size(cur); 359 } 360 return NULL; 361 } 362 363 HeapWord* 364 HeapRegion:: 365 oops_on_card_seq_iterate_careful(MemRegion mr, 366 FilterOutOfRegionClosure* cl, 367 bool filter_young, 368 jbyte* card_ptr) { 369 // Currently, we should only have to clean the card if filter_young 370 // is true and vice versa. 371 if (filter_young) { 372 assert(card_ptr != NULL, "pre-condition"); 373 } else { 374 assert(card_ptr == NULL, "pre-condition"); 375 } 376 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 377 378 // If we're within a stop-world GC, then we might look at a card in a 379 // GC alloc region that extends onto a GC LAB, which may not be 380 // parseable. Stop such at the "scan_top" of the region. 381 if (g1h->is_gc_active()) { 382 mr = mr.intersection(MemRegion(bottom(), scan_top())); 383 } else { 384 mr = mr.intersection(used_region()); 385 } 386 if (mr.is_empty()) return NULL; 387 // Otherwise, find the obj that extends onto mr.start(). 388 389 // The intersection of the incoming mr (for the card) and the 390 // allocated part of the region is non-empty. This implies that 391 // we have actually allocated into this region. The code in 392 // G1CollectedHeap.cpp that allocates a new region sets the 393 // is_young tag on the region before allocating. Thus we 394 // safely know if this region is young. 395 if (is_young() && filter_young) { 396 return NULL; 397 } 398 399 assert(!is_young(), "check value of filter_young"); 400 401 // We can only clean the card here, after we make the decision that 402 // the card is not young. And we only clean the card if we have been 403 // asked to (i.e., card_ptr != NULL). 404 if (card_ptr != NULL) { 405 *card_ptr = CardTableModRefBS::clean_card_val(); 406 // We must complete this write before we do any of the reads below. 407 OrderAccess::storeload(); 408 } 409 410 // Cache the boundaries of the memory region in some const locals 411 HeapWord* const start = mr.start(); 412 HeapWord* const end = mr.end(); 413 414 // We used to use "block_start_careful" here. But we're actually happy 415 // to update the BOT while we do this... 416 HeapWord* cur = block_start(start); 417 assert(cur <= start, "Postcondition"); 418 419 oop obj; 420 421 HeapWord* next = cur; 422 while (next <= start) { 423 cur = next; 424 obj = oop(cur); 425 if (obj->klass_or_null() == NULL) { 426 // Ran into an unparseable point. 427 return cur; 428 } 429 // Otherwise... 430 next = cur + block_size(cur); 431 } 432 433 // If we finish the above loop...We have a parseable object that 434 // begins on or before the start of the memory region, and ends 435 // inside or spans the entire region. 436 437 assert(obj == oop(cur), "sanity"); 438 assert(cur <= start, "Loop postcondition"); 439 assert(obj->klass_or_null() != NULL, "Loop postcondition"); 440 assert((cur + block_size(cur)) > start, "Loop postcondition"); 441 442 if (!g1h->is_obj_dead(obj)) { 443 obj->oop_iterate(cl, mr); 444 } 445 446 while (cur < end) { 447 obj = oop(cur); 448 if (obj->klass_or_null() == NULL) { 449 // Ran into an unparseable point. 450 return cur; 451 }; 452 453 // Otherwise: 454 next = cur + block_size(cur); 455 456 if (!g1h->is_obj_dead(obj)) { 457 if (next < end || !obj->is_objArray()) { 458 // This object either does not span the MemRegion 459 // boundary, or if it does it's not an array. 460 // Apply closure to whole object. 461 obj->oop_iterate(cl); 462 } else { 463 // This obj is an array that spans the boundary. 464 // Stop at the boundary. 465 obj->oop_iterate(cl, mr); 466 } 467 } 468 cur = next; 469 } 470 return NULL; 471 } 472 473 // Code roots support 474 475 void HeapRegion::add_strong_code_root(nmethod* nm) { 476 HeapRegionRemSet* hrrs = rem_set(); 477 hrrs->add_strong_code_root(nm); 478 } 479 480 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 481 assert_locked_or_safepoint(CodeCache_lock); 482 HeapRegionRemSet* hrrs = rem_set(); 483 hrrs->add_strong_code_root_locked(nm); 484 } 485 486 void HeapRegion::remove_strong_code_root(nmethod* nm) { 487 HeapRegionRemSet* hrrs = rem_set(); 488 hrrs->remove_strong_code_root(nm); 489 } 490 491 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 492 HeapRegionRemSet* hrrs = rem_set(); 493 hrrs->strong_code_roots_do(blk); 494 } 495 496 class VerifyStrongCodeRootOopClosure: public OopClosure { 497 const HeapRegion* _hr; 498 nmethod* _nm; 499 bool _failures; 500 bool _has_oops_in_region; 501 502 template <class T> void do_oop_work(T* p) { 503 T heap_oop = oopDesc::load_heap_oop(p); 504 if (!oopDesc::is_null(heap_oop)) { 505 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 506 507 // Note: not all the oops embedded in the nmethod are in the 508 // current region. We only look at those which are. 509 if (_hr->is_in(obj)) { 510 // Object is in the region. Check that its less than top 511 if (_hr->top() <= (HeapWord*)obj) { 512 // Object is above top 513 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " 514 "["PTR_FORMAT", "PTR_FORMAT") is above " 515 "top "PTR_FORMAT, 516 (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); 517 _failures = true; 518 return; 519 } 520 // Nmethod has at least one oop in the current region 521 _has_oops_in_region = true; 522 } 523 } 524 } 525 526 public: 527 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 528 _hr(hr), _failures(false), _has_oops_in_region(false) {} 529 530 void do_oop(narrowOop* p) { do_oop_work(p); } 531 void do_oop(oop* p) { do_oop_work(p); } 532 533 bool failures() { return _failures; } 534 bool has_oops_in_region() { return _has_oops_in_region; } 535 }; 536 537 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 538 const HeapRegion* _hr; 539 bool _failures; 540 public: 541 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 542 _hr(hr), _failures(false) {} 543 544 void do_code_blob(CodeBlob* cb) { 545 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 546 if (nm != NULL) { 547 // Verify that the nemthod is live 548 if (!nm->is_alive()) { 549 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " 550 PTR_FORMAT" in its strong code roots", 551 _hr->bottom(), _hr->end(), nm); 552 _failures = true; 553 } else { 554 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 555 nm->oops_do(&oop_cl); 556 if (!oop_cl.has_oops_in_region()) { 557 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " 558 PTR_FORMAT" in its strong code roots " 559 "with no pointers into region", 560 _hr->bottom(), _hr->end(), nm); 561 _failures = true; 562 } else if (oop_cl.failures()) { 563 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " 564 "failures for nmethod "PTR_FORMAT, 565 _hr->bottom(), _hr->end(), nm); 566 _failures = true; 567 } 568 } 569 } 570 } 571 572 bool failures() { return _failures; } 573 }; 574 575 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 576 if (!G1VerifyHeapRegionCodeRoots) { 577 // We're not verifying code roots. 578 return; 579 } 580 if (vo == VerifyOption_G1UseMarkWord) { 581 // Marking verification during a full GC is performed after class 582 // unloading, code cache unloading, etc so the strong code roots 583 // attached to each heap region are in an inconsistent state. They won't 584 // be consistent until the strong code roots are rebuilt after the 585 // actual GC. Skip verifying the strong code roots in this particular 586 // time. 587 assert(VerifyDuringGC, "only way to get here"); 588 return; 589 } 590 591 HeapRegionRemSet* hrrs = rem_set(); 592 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 593 594 // if this region is empty then there should be no entries 595 // on its strong code root list 596 if (is_empty()) { 597 if (strong_code_roots_length > 0) { 598 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " 599 "but has "SIZE_FORMAT" code root entries", 600 bottom(), end(), strong_code_roots_length); 601 *failures = true; 602 } 603 return; 604 } 605 606 if (is_continues_humongous()) { 607 if (strong_code_roots_length > 0) { 608 gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " 609 "region but has "SIZE_FORMAT" code root entries", 610 HR_FORMAT_PARAMS(this), strong_code_roots_length); 611 *failures = true; 612 } 613 return; 614 } 615 616 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 617 strong_code_roots_do(&cb_cl); 618 619 if (cb_cl.failures()) { 620 *failures = true; 621 } 622 } 623 624 void HeapRegion::print() const { print_on(gclog_or_tty); } 625 void HeapRegion::print_on(outputStream* st) const { 626 st->print("AC%4u", allocation_context()); 627 628 st->print(" %2s", get_short_type_str()); 629 if (in_collection_set()) 630 st->print(" CS"); 631 else 632 st->print(" "); 633 st->print(" TS %5d", _gc_time_stamp); 634 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 635 prev_top_at_mark_start(), next_top_at_mark_start()); 636 G1OffsetTableContigSpace::print_on(st); 637 } 638 639 class VerifyLiveClosure: public OopClosure { 640 private: 641 G1CollectedHeap* _g1h; 642 CardTableModRefBS* _bs; 643 oop _containing_obj; 644 bool _failures; 645 int _n_failures; 646 VerifyOption _vo; 647 public: 648 // _vo == UsePrevMarking -> use "prev" marking information, 649 // _vo == UseNextMarking -> use "next" marking information, 650 // _vo == UseMarkWord -> use mark word from object header. 651 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 652 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 653 _failures(false), _n_failures(0), _vo(vo) 654 { 655 BarrierSet* bs = _g1h->barrier_set(); 656 if (bs->is_a(BarrierSet::CardTableModRef)) 657 _bs = (CardTableModRefBS*)bs; 658 } 659 660 void set_containing_obj(oop obj) { 661 _containing_obj = obj; 662 } 663 664 bool failures() { return _failures; } 665 int n_failures() { return _n_failures; } 666 667 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 668 virtual void do_oop( oop* p) { do_oop_work(p); } 669 670 void print_object(outputStream* out, oop obj) { 671 #ifdef PRODUCT 672 Klass* k = obj->klass(); 673 const char* class_name = InstanceKlass::cast(k)->external_name(); 674 out->print_cr("class name %s", class_name); 675 #else // PRODUCT 676 obj->print_on(out); 677 #endif // PRODUCT 678 } 679 680 template <class T> 681 void do_oop_work(T* p) { 682 assert(_containing_obj != NULL, "Precondition"); 683 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 684 "Precondition"); 685 T heap_oop = oopDesc::load_heap_oop(p); 686 if (!oopDesc::is_null(heap_oop)) { 687 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 688 bool failed = false; 689 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 690 MutexLockerEx x(ParGCRareEvent_lock, 691 Mutex::_no_safepoint_check_flag); 692 693 if (!_failures) { 694 gclog_or_tty->cr(); 695 gclog_or_tty->print_cr("----------"); 696 } 697 if (!_g1h->is_in_closed_subset(obj)) { 698 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 699 gclog_or_tty->print_cr("Field "PTR_FORMAT 700 " of live obj "PTR_FORMAT" in region " 701 "["PTR_FORMAT", "PTR_FORMAT")", 702 p, (void*) _containing_obj, 703 from->bottom(), from->end()); 704 print_object(gclog_or_tty, _containing_obj); 705 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", 706 (void*) obj); 707 } else { 708 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 709 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 710 gclog_or_tty->print_cr("Field "PTR_FORMAT 711 " of live obj "PTR_FORMAT" in region " 712 "["PTR_FORMAT", "PTR_FORMAT")", 713 p, (void*) _containing_obj, 714 from->bottom(), from->end()); 715 print_object(gclog_or_tty, _containing_obj); 716 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " 717 "["PTR_FORMAT", "PTR_FORMAT")", 718 (void*) obj, to->bottom(), to->end()); 719 print_object(gclog_or_tty, obj); 720 } 721 gclog_or_tty->print_cr("----------"); 722 gclog_or_tty->flush(); 723 _failures = true; 724 failed = true; 725 _n_failures++; 726 } 727 728 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { 729 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 730 HeapRegion* to = _g1h->heap_region_containing(obj); 731 if (from != NULL && to != NULL && 732 from != to && 733 !to->is_humongous()) { 734 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 735 jbyte cv_field = *_bs->byte_for_const(p); 736 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 737 738 bool is_bad = !(from->is_young() 739 || to->rem_set()->contains_reference(p) 740 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 741 (_containing_obj->is_objArray() ? 742 cv_field == dirty 743 : cv_obj == dirty || cv_field == dirty)); 744 if (is_bad) { 745 MutexLockerEx x(ParGCRareEvent_lock, 746 Mutex::_no_safepoint_check_flag); 747 748 if (!_failures) { 749 gclog_or_tty->cr(); 750 gclog_or_tty->print_cr("----------"); 751 } 752 gclog_or_tty->print_cr("Missing rem set entry:"); 753 gclog_or_tty->print_cr("Field "PTR_FORMAT" " 754 "of obj "PTR_FORMAT", " 755 "in region "HR_FORMAT, 756 p, (void*) _containing_obj, 757 HR_FORMAT_PARAMS(from)); 758 _containing_obj->print_on(gclog_or_tty); 759 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " 760 "in region "HR_FORMAT, 761 (void*) obj, 762 HR_FORMAT_PARAMS(to)); 763 obj->print_on(gclog_or_tty); 764 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 765 cv_obj, cv_field); 766 gclog_or_tty->print_cr("----------"); 767 gclog_or_tty->flush(); 768 _failures = true; 769 if (!failed) _n_failures++; 770 } 771 } 772 } 773 } 774 } 775 }; 776 777 // This really ought to be commoned up into OffsetTableContigSpace somehow. 778 // We would need a mechanism to make that code skip dead objects. 779 780 void HeapRegion::verify(VerifyOption vo, 781 bool* failures) const { 782 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 783 *failures = false; 784 HeapWord* p = bottom(); 785 HeapWord* prev_p = NULL; 786 VerifyLiveClosure vl_cl(g1, vo); 787 bool is_region_humongous = is_humongous(); 788 size_t object_num = 0; 789 while (p < top()) { 790 oop obj = oop(p); 791 size_t obj_size = block_size(p); 792 object_num += 1; 793 794 if (is_region_humongous != g1->is_humongous(obj_size) && 795 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. 796 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 797 SIZE_FORMAT" words) in a %shumongous region", 798 p, g1->is_humongous(obj_size) ? "" : "non-", 799 obj_size, is_region_humongous ? "" : "non-"); 800 *failures = true; 801 return; 802 } 803 804 if (!g1->is_obj_dead_cond(obj, this, vo)) { 805 if (obj->is_oop()) { 806 Klass* klass = obj->klass(); 807 bool is_metaspace_object = Metaspace::contains(klass) || 808 (vo == VerifyOption_G1UsePrevMarking && 809 ClassLoaderDataGraph::unload_list_contains(klass)); 810 if (!is_metaspace_object) { 811 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 812 "not metadata", klass, (void *)obj); 813 *failures = true; 814 return; 815 } else if (!klass->is_klass()) { 816 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 817 "not a klass", klass, (void *)obj); 818 *failures = true; 819 return; 820 } else { 821 vl_cl.set_containing_obj(obj); 822 obj->oop_iterate_no_header(&vl_cl); 823 if (vl_cl.failures()) { 824 *failures = true; 825 } 826 if (G1MaxVerifyFailures >= 0 && 827 vl_cl.n_failures() >= G1MaxVerifyFailures) { 828 return; 829 } 830 } 831 } else { 832 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj); 833 *failures = true; 834 return; 835 } 836 } 837 prev_p = p; 838 p += obj_size; 839 } 840 841 if (!is_young() && !is_empty()) { 842 _offsets.verify(); 843 } 844 845 if (p != top()) { 846 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " 847 "does not match top "PTR_FORMAT, p, top()); 848 *failures = true; 849 return; 850 } 851 852 HeapWord* the_end = end(); 853 assert(p == top(), "it should still hold"); 854 // Do some extra BOT consistency checking for addresses in the 855 // range [top, end). BOT look-ups in this range should yield 856 // top. No point in doing that if top == end (there's nothing there). 857 if (p < the_end) { 858 // Look up top 859 HeapWord* addr_1 = p; 860 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 861 if (b_start_1 != p) { 862 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " 863 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 864 addr_1, b_start_1, p); 865 *failures = true; 866 return; 867 } 868 869 // Look up top + 1 870 HeapWord* addr_2 = p + 1; 871 if (addr_2 < the_end) { 872 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 873 if (b_start_2 != p) { 874 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " 875 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 876 addr_2, b_start_2, p); 877 *failures = true; 878 return; 879 } 880 } 881 882 // Look up an address between top and end 883 size_t diff = pointer_delta(the_end, p) / 2; 884 HeapWord* addr_3 = p + diff; 885 if (addr_3 < the_end) { 886 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 887 if (b_start_3 != p) { 888 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " 889 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 890 addr_3, b_start_3, p); 891 *failures = true; 892 return; 893 } 894 } 895 896 // Look up end - 1 897 HeapWord* addr_4 = the_end - 1; 898 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 899 if (b_start_4 != p) { 900 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " 901 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 902 addr_4, b_start_4, p); 903 *failures = true; 904 return; 905 } 906 } 907 908 if (is_region_humongous && object_num > 1) { 909 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 910 "but has "SIZE_FORMAT", objects", 911 bottom(), end(), object_num); 912 *failures = true; 913 return; 914 } 915 916 verify_strong_code_roots(vo, failures); 917 } 918 919 void HeapRegion::verify() const { 920 bool dummy = false; 921 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 922 } 923 924 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 925 scan_and_forward(this, cp); 926 } 927 928 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 929 // away eventually. 930 931 void G1OffsetTableContigSpace::clear(bool mangle_space) { 932 set_top(bottom()); 933 _scan_top = bottom(); 934 CompactibleSpace::clear(mangle_space); 935 reset_bot(); 936 } 937 938 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 939 Space::set_bottom(new_bottom); 940 _offsets.set_bottom(new_bottom); 941 } 942 943 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 944 Space::set_end(new_end); 945 _offsets.resize(new_end - bottom()); 946 } 947 948 void G1OffsetTableContigSpace::print() const { 949 print_short(); 950 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 951 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 952 bottom(), top(), _offsets.threshold(), end()); 953 } 954 955 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 956 return _offsets.initialize_threshold(); 957 } 958 959 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 960 HeapWord* end) { 961 _offsets.alloc_block(start, end); 962 return _offsets.threshold(); 963 } 964 965 HeapWord* G1OffsetTableContigSpace::scan_top() const { 966 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 967 HeapWord* local_top = top(); 968 OrderAccess::loadload(); 969 const unsigned local_time_stamp = _gc_time_stamp; 970 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); 971 if (local_time_stamp < g1h->get_gc_time_stamp()) { 972 return local_top; 973 } else { 974 return _scan_top; 975 } 976 } 977 978 void G1OffsetTableContigSpace::record_timestamp() { 979 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 980 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 981 982 if (_gc_time_stamp < curr_gc_time_stamp) { 983 // Setting the time stamp here tells concurrent readers to look at 984 // scan_top to know the maximum allowed address to look at. 985 986 // scan_top should be bottom for all regions except for the 987 // retained old alloc region which should have scan_top == top 988 HeapWord* st = _scan_top; 989 guarantee(st == _bottom || st == _top, "invariant"); 990 991 _gc_time_stamp = curr_gc_time_stamp; 992 } 993 } 994 995 void G1OffsetTableContigSpace::record_retained_region() { 996 // scan_top is the maximum address where it's safe for the next gc to 997 // scan this region. 998 _scan_top = top(); 999 } 1000 1001 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { 1002 object_iterate(blk); 1003 } 1004 1005 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { 1006 HeapWord* p = bottom(); 1007 while (p < top()) { 1008 if (block_is_obj(p)) { 1009 blk->do_object(oop(p)); 1010 } 1011 p += block_size(p); 1012 } 1013 } 1014 1015 G1OffsetTableContigSpace:: 1016 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1017 MemRegion mr) : 1018 _offsets(sharedOffsetArray, mr), 1019 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1020 _gc_time_stamp(0) 1021 { 1022 _offsets.set_space(this); 1023 } 1024 1025 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1026 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1027 _top = bottom(); 1028 _scan_top = bottom(); 1029 set_saved_mark_word(NULL); 1030 reset_bot(); 1031 } 1032