1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp" 32 #include "gc_implementation/g1/heapRegionRemSet.hpp" 33 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 34 #include "gc_implementation/shared/liveRange.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/iterator.hpp" 37 #include "memory/space.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/orderAccess.inline.hpp" 40 41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 42 43 int HeapRegion::LogOfHRGrainBytes = 0; 44 int HeapRegion::LogOfHRGrainWords = 0; 45 size_t HeapRegion::GrainBytes = 0; 46 size_t HeapRegion::GrainWords = 0; 47 size_t HeapRegion::CardsPerRegion = 0; 48 49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 50 HeapRegion* hr, 51 G1ParPushHeapRSClosure* cl, 52 CardTableModRefBS::PrecisionStyle precision) : 53 DirtyCardToOopClosure(hr, cl, precision, NULL), 54 _hr(hr), _rs_scan(cl), _g1(g1) { } 55 56 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 57 OopClosure* oc) : 58 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 59 60 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 61 HeapWord* bottom, 62 HeapWord* top) { 63 G1CollectedHeap* g1h = _g1; 64 size_t oop_size; 65 HeapWord* cur = bottom; 66 67 // Start filtering what we add to the remembered set. If the object is 68 // not considered dead, either because it is marked (in the mark bitmap) 69 // or it was allocated after marking finished, then we add it. Otherwise 70 // we can safely ignore the object. 71 if (!g1h->is_obj_dead(oop(cur), _hr)) { 72 oop_size = oop(cur)->oop_iterate(_rs_scan, mr); 73 } else { 74 oop_size = _hr->block_size(cur); 75 } 76 77 cur += oop_size; 78 79 if (cur < top) { 80 oop cur_oop = oop(cur); 81 oop_size = _hr->block_size(cur); 82 HeapWord* next_obj = cur + oop_size; 83 while (next_obj < top) { 84 // Keep filtering the remembered set. 85 if (!g1h->is_obj_dead(cur_oop, _hr)) { 86 // Bottom lies entirely below top, so we can call the 87 // non-memRegion version of oop_iterate below. 88 cur_oop->oop_iterate(_rs_scan); 89 } 90 cur = next_obj; 91 cur_oop = oop(cur); 92 oop_size = _hr->block_size(cur); 93 next_obj = cur + oop_size; 94 } 95 96 // Last object. Need to do dead-obj filtering here too. 97 if (!g1h->is_obj_dead(oop(cur), _hr)) { 98 oop(cur)->oop_iterate(_rs_scan, mr); 99 } 100 } 101 } 102 103 size_t HeapRegion::max_region_size() { 104 return HeapRegionBounds::max_size(); 105 } 106 107 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 108 uintx region_size = G1HeapRegionSize; 109 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 110 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 111 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 112 (uintx) HeapRegionBounds::min_size()); 113 } 114 115 int region_size_log = log2_long((jlong) region_size); 116 // Recalculate the region size to make sure it's a power of 117 // 2. This means that region_size is the largest power of 2 that's 118 // <= what we've calculated so far. 119 region_size = ((uintx)1 << region_size_log); 120 121 // Now make sure that we don't go over or under our limits. 122 if (region_size < HeapRegionBounds::min_size()) { 123 region_size = HeapRegionBounds::min_size(); 124 } else if (region_size > HeapRegionBounds::max_size()) { 125 region_size = HeapRegionBounds::max_size(); 126 } 127 128 // And recalculate the log. 129 region_size_log = log2_long((jlong) region_size); 130 131 // Now, set up the globals. 132 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 133 LogOfHRGrainBytes = region_size_log; 134 135 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 136 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 137 138 guarantee(GrainBytes == 0, "we should only set it once"); 139 // The cast to int is safe, given that we've bounded region_size by 140 // MIN_REGION_SIZE and MAX_REGION_SIZE. 141 GrainBytes = (size_t)region_size; 142 143 guarantee(GrainWords == 0, "we should only set it once"); 144 GrainWords = GrainBytes >> LogHeapWordSize; 145 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 146 147 guarantee(CardsPerRegion == 0, "we should only set it once"); 148 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 149 } 150 151 void HeapRegion::reset_after_compaction() { 152 G1OffsetTableContigSpace::reset_after_compaction(); 153 // After a compaction the mark bitmap is invalid, so we must 154 // treat all objects as being inside the unmarked area. 155 zero_marked_bytes(); 156 init_top_at_mark_start(); 157 } 158 159 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 160 assert(_humongous_start_region == NULL, 161 "we should have already filtered out humongous regions"); 162 assert(_end == _orig_end, 163 "we should have already filtered out humongous regions"); 164 165 _in_collection_set = false; 166 167 set_allocation_context(AllocationContext::system()); 168 set_young_index_in_cset(-1); 169 uninstall_surv_rate_group(); 170 set_free(); 171 reset_pre_dummy_top(); 172 173 if (!par) { 174 // If this is parallel, this will be done later. 175 HeapRegionRemSet* hrrs = rem_set(); 176 if (locked) { 177 hrrs->clear_locked(); 178 } else { 179 hrrs->clear(); 180 } 181 _claimed = InitialClaimValue; 182 } 183 zero_marked_bytes(); 184 185 _offsets.resize(HeapRegion::GrainWords); 186 init_top_at_mark_start(); 187 if (clear_space) clear(SpaceDecorator::Mangle); 188 } 189 190 void HeapRegion::par_clear() { 191 assert(used() == 0, "the region should have been already cleared"); 192 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 193 HeapRegionRemSet* hrrs = rem_set(); 194 hrrs->clear(); 195 CardTableModRefBS* ct_bs = 196 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 197 ct_bs->clear(MemRegion(bottom(), end())); 198 } 199 200 void HeapRegion::calc_gc_efficiency() { 201 // GC efficiency is the ratio of how much space would be 202 // reclaimed over how long we predict it would take to reclaim it. 203 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 204 G1CollectorPolicy* g1p = g1h->g1_policy(); 205 206 // Retrieve a prediction of the elapsed time for this region for 207 // a mixed gc because the region will only be evacuated during a 208 // mixed gc. 209 double region_elapsed_time_ms = 210 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 211 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 212 } 213 214 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { 215 assert(!isHumongous(), "sanity / pre-condition"); 216 assert(end() == _orig_end, 217 "Should be normal before the humongous object allocation"); 218 assert(top() == bottom(), "should be empty"); 219 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 220 221 _type.set_starts_humongous(); 222 _humongous_start_region = this; 223 224 set_end(new_end); 225 _offsets.set_for_starts_humongous(new_top); 226 } 227 228 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { 229 assert(!isHumongous(), "sanity / pre-condition"); 230 assert(end() == _orig_end, 231 "Should be normal before the humongous object allocation"); 232 assert(top() == bottom(), "should be empty"); 233 assert(first_hr->startsHumongous(), "pre-condition"); 234 235 _type.set_continues_humongous(); 236 _humongous_start_region = first_hr; 237 } 238 239 void HeapRegion::clear_humongous() { 240 assert(isHumongous(), "pre-condition"); 241 242 if (startsHumongous()) { 243 assert(top() <= end(), "pre-condition"); 244 set_end(_orig_end); 245 if (top() > end()) { 246 // at least one "continues humongous" region after it 247 set_top(end()); 248 } 249 } else { 250 // continues humongous 251 assert(end() == _orig_end, "sanity"); 252 } 253 254 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 255 _humongous_start_region = NULL; 256 } 257 258 bool HeapRegion::claimHeapRegion(jint claimValue) { 259 jint current = _claimed; 260 if (current != claimValue) { 261 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); 262 if (res == current) { 263 return true; 264 } 265 } 266 return false; 267 } 268 269 HeapRegion::HeapRegion(uint hrm_index, 270 G1BlockOffsetSharedArray* sharedOffsetArray, 271 MemRegion mr) : 272 G1OffsetTableContigSpace(sharedOffsetArray, mr), 273 _hrm_index(hrm_index), 274 _allocation_context(AllocationContext::system()), 275 _humongous_start_region(NULL), 276 _in_collection_set(false), 277 _next_in_special_set(NULL), _orig_end(NULL), 278 _claimed(InitialClaimValue), _evacuation_failed(false), 279 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 280 _next_young_region(NULL), 281 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), 282 #ifdef ASSERT 283 _containing_set(NULL), 284 #endif // ASSERT 285 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 286 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 287 _predicted_bytes_to_copy(0) 288 { 289 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 290 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 291 292 initialize(mr); 293 } 294 295 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 296 assert(_rem_set->is_empty(), "Remembered set must be empty"); 297 298 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); 299 300 _orig_end = mr.end(); 301 hr_clear(false /*par*/, false /*clear_space*/); 302 set_top(bottom()); 303 record_timestamp(); 304 } 305 306 CompactibleSpace* HeapRegion::next_compaction_space() const { 307 return G1CollectedHeap::heap()->next_compaction_region(this); 308 } 309 310 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 311 bool during_conc_mark) { 312 // We always recreate the prev marking info and we'll explicitly 313 // mark all objects we find to be self-forwarded on the prev 314 // bitmap. So all objects need to be below PTAMS. 315 _prev_marked_bytes = 0; 316 317 if (during_initial_mark) { 318 // During initial-mark, we'll also explicitly mark all objects 319 // we find to be self-forwarded on the next bitmap. So all 320 // objects need to be below NTAMS. 321 _next_top_at_mark_start = top(); 322 _next_marked_bytes = 0; 323 } else if (during_conc_mark) { 324 // During concurrent mark, all objects in the CSet (including 325 // the ones we find to be self-forwarded) are implicitly live. 326 // So all objects need to be above NTAMS. 327 _next_top_at_mark_start = bottom(); 328 _next_marked_bytes = 0; 329 } 330 } 331 332 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 333 bool during_conc_mark, 334 size_t marked_bytes) { 335 assert(0 <= marked_bytes && marked_bytes <= used(), 336 err_msg("marked: " SIZE_FORMAT " used: " SIZE_FORMAT, 337 marked_bytes, used())); 338 _prev_top_at_mark_start = top(); 339 _prev_marked_bytes = marked_bytes; 340 } 341 342 HeapWord* 343 HeapRegion::object_iterate_mem_careful(MemRegion mr, 344 ObjectClosure* cl) { 345 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 346 // We used to use "block_start_careful" here. But we're actually happy 347 // to update the BOT while we do this... 348 HeapWord* cur = block_start(mr.start()); 349 mr = mr.intersection(used_region()); 350 if (mr.is_empty()) return NULL; 351 // Otherwise, find the obj that extends onto mr.start(). 352 353 assert(cur <= mr.start() 354 && (oop(cur)->klass_or_null() == NULL || 355 cur + oop(cur)->size() > mr.start()), 356 "postcondition of block_start"); 357 oop obj; 358 while (cur < mr.end()) { 359 obj = oop(cur); 360 if (obj->klass_or_null() == NULL) { 361 // Ran into an unparseable point. 362 return cur; 363 } else if (!g1h->is_obj_dead(obj)) { 364 cl->do_object(obj); 365 } 366 if (cl->abort()) return cur; 367 // The check above must occur before the operation below, since an 368 // abort might invalidate the "size" operation. 369 cur += block_size(cur); 370 } 371 return NULL; 372 } 373 374 HeapWord* 375 HeapRegion:: 376 oops_on_card_seq_iterate_careful(MemRegion mr, 377 FilterOutOfRegionClosure* cl, 378 bool filter_young, 379 jbyte* card_ptr) { 380 // Currently, we should only have to clean the card if filter_young 381 // is true and vice versa. 382 if (filter_young) { 383 assert(card_ptr != NULL, "pre-condition"); 384 } else { 385 assert(card_ptr == NULL, "pre-condition"); 386 } 387 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 388 389 // If we're within a stop-world GC, then we might look at a card in a 390 // GC alloc region that extends onto a GC LAB, which may not be 391 // parseable. Stop such at the "scan_top" of the region. 392 if (g1h->is_gc_active()) { 393 mr = mr.intersection(MemRegion(bottom(), scan_top())); 394 } else { 395 mr = mr.intersection(used_region()); 396 } 397 if (mr.is_empty()) return NULL; 398 // Otherwise, find the obj that extends onto mr.start(). 399 400 // The intersection of the incoming mr (for the card) and the 401 // allocated part of the region is non-empty. This implies that 402 // we have actually allocated into this region. The code in 403 // G1CollectedHeap.cpp that allocates a new region sets the 404 // is_young tag on the region before allocating. Thus we 405 // safely know if this region is young. 406 if (is_young() && filter_young) { 407 return NULL; 408 } 409 410 assert(!is_young(), "check value of filter_young"); 411 412 // We can only clean the card here, after we make the decision that 413 // the card is not young. And we only clean the card if we have been 414 // asked to (i.e., card_ptr != NULL). 415 if (card_ptr != NULL) { 416 *card_ptr = CardTableModRefBS::clean_card_val(); 417 // We must complete this write before we do any of the reads below. 418 OrderAccess::storeload(); 419 } 420 421 // Cache the boundaries of the memory region in some const locals 422 HeapWord* const start = mr.start(); 423 HeapWord* const end = mr.end(); 424 425 // We used to use "block_start_careful" here. But we're actually happy 426 // to update the BOT while we do this... 427 HeapWord* cur = block_start(start); 428 assert(cur <= start, "Postcondition"); 429 430 oop obj; 431 432 HeapWord* next = cur; 433 do { 434 cur = next; 435 obj = oop(cur); 436 if (obj->klass_or_null() == NULL) { 437 // Ran into an unparseable point. 438 return cur; 439 } 440 // Otherwise... 441 next = cur + block_size(cur); 442 } while (next <= start); 443 444 // If we finish the above loop...We have a parseable object that 445 // begins on or before the start of the memory region, and ends 446 // inside or spans the entire region. 447 assert(cur <= start, "Loop postcondition"); 448 assert(obj->klass_or_null() != NULL, "Loop postcondition"); 449 450 do { 451 obj = oop(cur); 452 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant"); 453 if (obj->klass_or_null() == NULL) { 454 // Ran into an unparseable point. 455 return cur; 456 } 457 458 // Advance the current pointer. "obj" still points to the object to iterate. 459 cur = cur + block_size(cur); 460 461 if (!g1h->is_obj_dead(obj)) { 462 // Non-objArrays are sometimes marked imprecise at the object start. We 463 // always need to iterate over them in full. 464 // We only iterate over object arrays in full if they are completely contained 465 // in the memory region. 466 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 467 obj->oop_iterate(cl); 468 } else { 469 obj->oop_iterate(cl, mr); 470 } 471 } 472 } while (cur < end); 473 474 return NULL; 475 } 476 477 // Code roots support 478 479 void HeapRegion::add_strong_code_root(nmethod* nm) { 480 HeapRegionRemSet* hrrs = rem_set(); 481 hrrs->add_strong_code_root(nm); 482 } 483 484 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 485 assert_locked_or_safepoint(CodeCache_lock); 486 HeapRegionRemSet* hrrs = rem_set(); 487 hrrs->add_strong_code_root_locked(nm); 488 } 489 490 void HeapRegion::remove_strong_code_root(nmethod* nm) { 491 HeapRegionRemSet* hrrs = rem_set(); 492 hrrs->remove_strong_code_root(nm); 493 } 494 495 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 496 HeapRegionRemSet* hrrs = rem_set(); 497 hrrs->strong_code_roots_do(blk); 498 } 499 500 class VerifyStrongCodeRootOopClosure: public OopClosure { 501 const HeapRegion* _hr; 502 nmethod* _nm; 503 bool _failures; 504 bool _has_oops_in_region; 505 506 template <class T> void do_oop_work(T* p) { 507 T heap_oop = oopDesc::load_heap_oop(p); 508 if (!oopDesc::is_null(heap_oop)) { 509 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 510 511 // Note: not all the oops embedded in the nmethod are in the 512 // current region. We only look at those which are. 513 if (_hr->is_in(obj)) { 514 // Object is in the region. Check that its less than top 515 if (_hr->top() <= (HeapWord*)obj) { 516 // Object is above top 517 gclog_or_tty->print_cr("Object " PTR_FORMAT " in region " 518 "[" PTR_FORMAT ", " PTR_FORMAT ") is above " 519 "top " PTR_FORMAT, 520 (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); 521 _failures = true; 522 return; 523 } 524 // Nmethod has at least one oop in the current region 525 _has_oops_in_region = true; 526 } 527 } 528 } 529 530 public: 531 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 532 _hr(hr), _failures(false), _has_oops_in_region(false) {} 533 534 void do_oop(narrowOop* p) { do_oop_work(p); } 535 void do_oop(oop* p) { do_oop_work(p); } 536 537 bool failures() { return _failures; } 538 bool has_oops_in_region() { return _has_oops_in_region; } 539 }; 540 541 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 542 const HeapRegion* _hr; 543 bool _failures; 544 public: 545 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 546 _hr(hr), _failures(false) {} 547 548 void do_code_blob(CodeBlob* cb) { 549 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 550 if (nm != NULL) { 551 // Verify that the nemthod is live 552 if (!nm->is_alive()) { 553 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " 554 PTR_FORMAT " in its strong code roots", 555 _hr->bottom(), _hr->end(), nm); 556 _failures = true; 557 } else { 558 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 559 nm->oops_do(&oop_cl); 560 if (!oop_cl.has_oops_in_region()) { 561 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " 562 PTR_FORMAT " in its strong code roots " 563 "with no pointers into region", 564 _hr->bottom(), _hr->end(), nm); 565 _failures = true; 566 } else if (oop_cl.failures()) { 567 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has other " 568 "failures for nmethod " PTR_FORMAT, 569 _hr->bottom(), _hr->end(), nm); 570 _failures = true; 571 } 572 } 573 } 574 } 575 576 bool failures() { return _failures; } 577 }; 578 579 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 580 if (!G1VerifyHeapRegionCodeRoots) { 581 // We're not verifying code roots. 582 return; 583 } 584 if (vo == VerifyOption_G1UseMarkWord) { 585 // Marking verification during a full GC is performed after class 586 // unloading, code cache unloading, etc so the strong code roots 587 // attached to each heap region are in an inconsistent state. They won't 588 // be consistent until the strong code roots are rebuilt after the 589 // actual GC. Skip verifying the strong code roots in this particular 590 // time. 591 assert(VerifyDuringGC, "only way to get here"); 592 return; 593 } 594 595 HeapRegionRemSet* hrrs = rem_set(); 596 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 597 598 // if this region is empty then there should be no entries 599 // on its strong code root list 600 if (is_empty()) { 601 if (strong_code_roots_length > 0) { 602 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is empty " 603 "but has " SIZE_FORMAT " code root entries", 604 bottom(), end(), strong_code_roots_length); 605 *failures = true; 606 } 607 return; 608 } 609 610 if (continuesHumongous()) { 611 if (strong_code_roots_length > 0) { 612 gclog_or_tty->print_cr("region " HR_FORMAT " is a continuation of a humongous " 613 "region but has " SIZE_FORMAT " code root entries", 614 HR_FORMAT_PARAMS(this), strong_code_roots_length); 615 *failures = true; 616 } 617 return; 618 } 619 620 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 621 strong_code_roots_do(&cb_cl); 622 623 if (cb_cl.failures()) { 624 *failures = true; 625 } 626 } 627 628 void HeapRegion::print() const { print_on(gclog_or_tty); } 629 void HeapRegion::print_on(outputStream* st) const { 630 st->print("AC%4u", allocation_context()); 631 st->print(" %2s", get_short_type_str()); 632 if (in_collection_set()) 633 st->print(" CS"); 634 else 635 st->print(" "); 636 st->print(" TS %5d", _gc_time_stamp); 637 st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT, 638 prev_top_at_mark_start(), next_top_at_mark_start()); 639 G1OffsetTableContigSpace::print_on(st); 640 } 641 642 class G1VerificationClosure : public OopClosure { 643 protected: 644 G1CollectedHeap* _g1h; 645 CardTableModRefBS* _bs; 646 oop _containing_obj; 647 bool _failures; 648 int _n_failures; 649 VerifyOption _vo; 650 public: 651 // _vo == UsePrevMarking -> use "prev" marking information, 652 // _vo == UseNextMarking -> use "next" marking information, 653 // _vo == UseMarkWord -> use mark word from object header. 654 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 655 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 656 _failures(false), _n_failures(0), _vo(vo) 657 { 658 BarrierSet* bs = _g1h->barrier_set(); 659 if (bs->is_a(BarrierSet::CardTableModRef)) 660 _bs = (CardTableModRefBS*)bs; 661 } 662 663 void set_containing_obj(oop obj) { 664 _containing_obj = obj; 665 } 666 667 bool failures() { return _failures; } 668 int n_failures() { return _n_failures; } 669 670 void print_object(outputStream* out, oop obj) { 671 #ifdef PRODUCT 672 Klass* k = obj->klass(); 673 const char* class_name = InstanceKlass::cast(k)->external_name(); 674 out->print_cr("class name %s", class_name); 675 #else // PRODUCT 676 obj->print_on(out); 677 #endif // PRODUCT 678 } 679 }; 680 681 class VerifyLiveClosure : public G1VerificationClosure { 682 public: 683 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 684 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 685 virtual void do_oop(oop* p) { do_oop_work(p); } 686 687 template <class T> 688 void do_oop_work(T* p) { 689 assert(_containing_obj != NULL, "Precondition"); 690 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 691 "Precondition"); 692 verify_liveness(p); 693 } 694 695 template <class T> 696 void verify_liveness(T* p) { 697 T heap_oop = oopDesc::load_heap_oop(p); 698 if (!oopDesc::is_null(heap_oop)) { 699 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 700 bool failed = false; 701 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 702 MutexLockerEx x(ParGCRareEvent_lock, 703 Mutex::_no_safepoint_check_flag); 704 705 if (!_failures) { 706 gclog_or_tty->cr(); 707 gclog_or_tty->print_cr("----------"); 708 } 709 if (!_g1h->is_in_closed_subset(obj)) { 710 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 711 gclog_or_tty->print_cr("Field " PTR_FORMAT 712 " of live obj " PTR_FORMAT " in region " 713 "[" PTR_FORMAT ", " PTR_FORMAT ")", 714 p, (void*) _containing_obj, 715 from->bottom(), from->end()); 716 print_object(gclog_or_tty, _containing_obj); 717 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " not in the heap", 718 (void*) obj); 719 } else { 720 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 721 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 722 gclog_or_tty->print_cr("Field " PTR_FORMAT 723 " of live obj " PTR_FORMAT " in region " 724 "[" PTR_FORMAT ", " PTR_FORMAT ")", 725 p, (void*) _containing_obj, 726 from->bottom(), from->end()); 727 print_object(gclog_or_tty, _containing_obj); 728 gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region " 729 "[" PTR_FORMAT ", " PTR_FORMAT ")", 730 (void*) obj, to->bottom(), to->end()); 731 print_object(gclog_or_tty, obj); 732 } 733 gclog_or_tty->print_cr("----------"); 734 gclog_or_tty->flush(); 735 _failures = true; 736 failed = true; 737 _n_failures++; 738 } 739 } 740 } 741 }; 742 743 class VerifyRemSetClosure : public G1VerificationClosure { 744 public: 745 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 746 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 747 virtual void do_oop(oop* p) { do_oop_work(p); } 748 749 template <class T> 750 void do_oop_work(T* p) { 751 assert(_containing_obj != NULL, "Precondition"); 752 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 753 "Precondition"); 754 verify_remembered_set(p); 755 } 756 757 template <class T> 758 void verify_remembered_set(T* p) { 759 T heap_oop = oopDesc::load_heap_oop(p); 760 if (!oopDesc::is_null(heap_oop)) { 761 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 762 bool failed = false; 763 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 764 HeapRegion* to = _g1h->heap_region_containing(obj); 765 if (from != NULL && to != NULL && 766 from != to && 767 !to->isHumongous()) { 768 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 769 jbyte cv_field = *_bs->byte_for_const(p); 770 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 771 772 bool is_bad = !(from->is_young() 773 || to->rem_set()->contains_reference(p) 774 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 775 (_containing_obj->is_objArray() ? 776 cv_field == dirty 777 : cv_obj == dirty || cv_field == dirty)); 778 if (is_bad) { 779 MutexLockerEx x(ParGCRareEvent_lock, 780 Mutex::_no_safepoint_check_flag); 781 782 if (!_failures) { 783 gclog_or_tty->cr(); 784 gclog_or_tty->print_cr("----------"); 785 } 786 gclog_or_tty->print_cr("Missing rem set entry:"); 787 gclog_or_tty->print_cr("Field " PTR_FORMAT " " 788 "of obj " PTR_FORMAT ", " 789 "in region " HR_FORMAT, 790 p, (void*) _containing_obj, 791 HR_FORMAT_PARAMS(from)); 792 _containing_obj->print_on(gclog_or_tty); 793 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " " 794 "in region " HR_FORMAT, 795 (void*) obj, 796 HR_FORMAT_PARAMS(to)); 797 if (obj->is_oop()) { 798 obj->print_on(gclog_or_tty); 799 } 800 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 801 cv_obj, cv_field); 802 gclog_or_tty->print_cr("----------"); 803 gclog_or_tty->flush(); 804 _failures = true; 805 if (!failed) _n_failures++; 806 } 807 } 808 } 809 } 810 }; 811 812 // This really ought to be commoned up into OffsetTableContigSpace somehow. 813 // We would need a mechanism to make that code skip dead objects. 814 815 void HeapRegion::verify(VerifyOption vo, 816 bool* failures) const { 817 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 818 *failures = false; 819 HeapWord* p = bottom(); 820 HeapWord* prev_p = NULL; 821 VerifyLiveClosure vl_cl(g1, vo); 822 VerifyRemSetClosure vr_cl(g1, vo); 823 bool is_humongous = isHumongous(); 824 bool do_bot_verify = !is_young(); 825 size_t object_num = 0; 826 while (p < top()) { 827 oop obj = oop(p); 828 size_t obj_size = block_size(p); 829 object_num += 1; 830 831 if (is_humongous != g1->isHumongous(obj_size) && 832 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. 833 gclog_or_tty->print_cr("obj " PTR_FORMAT " is of %shumongous size (" 834 SIZE_FORMAT " words) in a %shumongous region", 835 p, g1->isHumongous(obj_size) ? "" : "non-", 836 obj_size, is_humongous ? "" : "non-"); 837 *failures = true; 838 return; 839 } 840 841 // If it returns false, verify_for_object() will output the 842 // appropriate message. 843 if (do_bot_verify && 844 !g1->is_obj_dead(obj, this) && 845 !_offsets.verify_for_object(p, obj_size)) { 846 *failures = true; 847 return; 848 } 849 850 if (!g1->is_obj_dead_cond(obj, this, vo)) { 851 if (obj->is_oop()) { 852 Klass* klass = obj->klass(); 853 bool is_metaspace_object = Metaspace::contains(klass) || 854 (vo == VerifyOption_G1UsePrevMarking && 855 ClassLoaderDataGraph::unload_list_contains(klass)); 856 if (!is_metaspace_object) { 857 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " " 858 "not metadata", klass, (void *)obj); 859 *failures = true; 860 return; 861 } else if (!klass->is_klass()) { 862 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " " 863 "not a klass", klass, (void *)obj); 864 *failures = true; 865 return; 866 } else { 867 vl_cl.set_containing_obj(obj); 868 if (!g1->full_collection() || G1VerifyRSetsDuringFullGC) { 869 // verify liveness and rem_set 870 vr_cl.set_containing_obj(obj); 871 G1Mux2Closure mux(&vl_cl, &vr_cl); 872 obj->oop_iterate_no_header(&mux); 873 874 if (vr_cl.failures()) { 875 *failures = true; 876 } 877 if (G1MaxVerifyFailures >= 0 && 878 vr_cl.n_failures() >= G1MaxVerifyFailures) { 879 return; 880 } 881 } else { 882 // verify only liveness 883 obj->oop_iterate_no_header(&vl_cl); 884 } 885 if (vl_cl.failures()) { 886 *failures = true; 887 } 888 if (G1MaxVerifyFailures >= 0 && 889 vl_cl.n_failures() >= G1MaxVerifyFailures) { 890 return; 891 } 892 } 893 } else { 894 gclog_or_tty->print_cr(PTR_FORMAT " not an oop", (void *)obj); 895 *failures = true; 896 return; 897 } 898 } 899 prev_p = p; 900 p += obj_size; 901 } 902 903 if (p != top()) { 904 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " " 905 "does not match top " PTR_FORMAT, p, top()); 906 *failures = true; 907 return; 908 } 909 910 HeapWord* the_end = end(); 911 assert(p == top(), "it should still hold"); 912 // Do some extra BOT consistency checking for addresses in the 913 // range [top, end). BOT look-ups in this range should yield 914 // top. No point in doing that if top == end (there's nothing there). 915 if (p < the_end) { 916 // Look up top 917 HeapWord* addr_1 = p; 918 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 919 if (b_start_1 != p) { 920 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " " 921 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 922 addr_1, b_start_1, p); 923 *failures = true; 924 return; 925 } 926 927 // Look up top + 1 928 HeapWord* addr_2 = p + 1; 929 if (addr_2 < the_end) { 930 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 931 if (b_start_2 != p) { 932 gclog_or_tty->print_cr("BOT look up for top + 1: " PTR_FORMAT " " 933 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 934 addr_2, b_start_2, p); 935 *failures = true; 936 return; 937 } 938 } 939 940 // Look up an address between top and end 941 size_t diff = pointer_delta(the_end, p) / 2; 942 HeapWord* addr_3 = p + diff; 943 if (addr_3 < the_end) { 944 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 945 if (b_start_3 != p) { 946 gclog_or_tty->print_cr("BOT look up for top + diff: " PTR_FORMAT " " 947 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 948 addr_3, b_start_3, p); 949 *failures = true; 950 return; 951 } 952 } 953 954 // Loook up end - 1 955 HeapWord* addr_4 = the_end - 1; 956 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 957 if (b_start_4 != p) { 958 gclog_or_tty->print_cr("BOT look up for end - 1: " PTR_FORMAT " " 959 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 960 addr_4, b_start_4, p); 961 *failures = true; 962 return; 963 } 964 } 965 966 if (is_humongous && object_num > 1) { 967 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is humongous " 968 "but has " SIZE_FORMAT ", objects", 969 bottom(), end(), object_num); 970 *failures = true; 971 return; 972 } 973 974 verify_strong_code_roots(vo, failures); 975 } 976 977 void HeapRegion::verify() const { 978 bool dummy = false; 979 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 980 } 981 982 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 983 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 984 *failures = false; 985 HeapWord* p = bottom(); 986 HeapWord* prev_p = NULL; 987 VerifyRemSetClosure vr_cl(g1, vo); 988 while (p < top()) { 989 oop obj = oop(p); 990 size_t obj_size = block_size(p); 991 992 if (!g1->is_obj_dead_cond(obj, this, vo)) { 993 if (obj->is_oop()) { 994 vr_cl.set_containing_obj(obj); 995 obj->oop_iterate_no_header(&vr_cl); 996 997 if (vr_cl.failures()) { 998 *failures = true; 999 } 1000 if (G1MaxVerifyFailures >= 0 && 1001 vr_cl.n_failures() >= G1MaxVerifyFailures) { 1002 return; 1003 } 1004 } else { 1005 gclog_or_tty->print_cr(PTR_FORMAT " not an oop", p2i(obj)); 1006 *failures = true; 1007 return; 1008 } 1009 } 1010 1011 prev_p = p; 1012 p += obj_size; 1013 } 1014 } 1015 1016 void HeapRegion::verify_rem_set() const { 1017 bool failures = false; 1018 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 1019 guarantee(!failures, "HeapRegion RemSet verification failed"); 1020 } 1021 1022 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1023 // away eventually. 1024 1025 void G1OffsetTableContigSpace::clear(bool mangle_space) { 1026 set_top(bottom()); 1027 _scan_top = bottom(); 1028 CompactibleSpace::clear(mangle_space); 1029 reset_bot(); 1030 } 1031 1032 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 1033 Space::set_bottom(new_bottom); 1034 _offsets.set_bottom(new_bottom); 1035 } 1036 1037 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 1038 Space::set_end(new_end); 1039 _offsets.resize(new_end - bottom()); 1040 } 1041 1042 void G1OffsetTableContigSpace::print() const { 1043 print_short(); 1044 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1045 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1046 bottom(), top(), _offsets.threshold(), end()); 1047 } 1048 1049 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 1050 return _offsets.initialize_threshold(); 1051 } 1052 1053 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 1054 HeapWord* end) { 1055 _offsets.alloc_block(start, end); 1056 return _offsets.threshold(); 1057 } 1058 1059 HeapWord* G1OffsetTableContigSpace::scan_top() const { 1060 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1061 HeapWord* local_top = top(); 1062 OrderAccess::loadload(); 1063 const unsigned local_time_stamp = _gc_time_stamp; 1064 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); 1065 if (local_time_stamp < g1h->get_gc_time_stamp()) { 1066 return local_top; 1067 } else { 1068 return _scan_top; 1069 } 1070 } 1071 1072 void G1OffsetTableContigSpace::record_timestamp() { 1073 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1074 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1075 1076 if (_gc_time_stamp < curr_gc_time_stamp) { 1077 // Setting the time stamp here tells concurrent readers to look at 1078 // scan_top to know the maximum allowed address to look at. 1079 1080 // scan_top should be bottom for all regions except for the 1081 // retained old alloc region which should have scan_top == top 1082 HeapWord* st = _scan_top; 1083 guarantee(st == _bottom || st == _top, "invariant"); 1084 1085 _gc_time_stamp = curr_gc_time_stamp; 1086 } 1087 } 1088 1089 void G1OffsetTableContigSpace::record_retained_region() { 1090 // scan_top is the maximum address where it's safe for the next gc to 1091 // scan this region. 1092 _scan_top = top(); 1093 } 1094 1095 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { 1096 object_iterate(blk); 1097 } 1098 1099 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { 1100 HeapWord* p = bottom(); 1101 while (p < top()) { 1102 if (block_is_obj(p)) { 1103 blk->do_object(oop(p)); 1104 } 1105 p += block_size(p); 1106 } 1107 } 1108 1109 #define block_is_always_obj(q) true 1110 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) { 1111 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size); 1112 } 1113 #undef block_is_always_obj 1114 1115 G1OffsetTableContigSpace:: 1116 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1117 MemRegion mr) : 1118 _offsets(sharedOffsetArray, mr), 1119 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1120 _gc_time_stamp(0) 1121 { 1122 _offsets.set_space(this); 1123 } 1124 1125 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1126 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1127 _top = bottom(); 1128 _scan_top = bottom(); 1129 set_saved_mark_word(NULL); 1130 reset_bot(); 1131 } 1132