1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp" 32 #include "gc_implementation/g1/heapRegionRemSet.hpp" 33 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 34 #include "gc_implementation/shared/liveRange.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/iterator.hpp" 37 #include "memory/space.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/orderAccess.inline.hpp" 40 41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 42 43 int HeapRegion::LogOfHRGrainBytes = 0; 44 int HeapRegion::LogOfHRGrainWords = 0; 45 size_t HeapRegion::GrainBytes = 0; 46 size_t HeapRegion::GrainWords = 0; 47 size_t HeapRegion::CardsPerRegion = 0; 48 49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 50 HeapRegion* hr, ExtendedOopClosure* cl, 51 CardTableModRefBS::PrecisionStyle precision, 52 FilterKind fk) : 53 DirtyCardToOopClosure(hr, cl, precision, NULL), 54 _hr(hr), _fk(fk), _g1(g1) { } 55 56 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 57 OopClosure* oc) : 58 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 59 60 template<class ClosureType> 61 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 62 HeapRegion* hr, 63 HeapWord* cur, HeapWord* top) { 64 oop cur_oop = oop(cur); 65 size_t oop_size = hr->block_size(cur); 66 HeapWord* next_obj = cur + oop_size; 67 while (next_obj < top) { 68 // Keep filtering the remembered set. 69 if (!g1h->is_obj_dead(cur_oop, hr)) { 70 // Bottom lies entirely below top, so we can call the 71 // non-memRegion version of oop_iterate below. 72 cur_oop->oop_iterate(cl); 73 } 74 cur = next_obj; 75 cur_oop = oop(cur); 76 oop_size = hr->block_size(cur); 77 next_obj = cur + oop_size; 78 } 79 return cur; 80 } 81 82 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 83 HeapWord* bottom, 84 HeapWord* top) { 85 G1CollectedHeap* g1h = _g1; 86 size_t oop_size; 87 ExtendedOopClosure* cl2 = NULL; 88 89 FilterIntoCSClosure intoCSFilt(this, g1h, _cl); 90 FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl); 91 92 switch (_fk) { 93 case NoFilterKind: cl2 = _cl; break; 94 case IntoCSFilterKind: cl2 = &intoCSFilt; break; 95 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; 96 default: ShouldNotReachHere(); 97 } 98 99 // Start filtering what we add to the remembered set. If the object is 100 // not considered dead, either because it is marked (in the mark bitmap) 101 // or it was allocated after marking finished, then we add it. Otherwise 102 // we can safely ignore the object. 103 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 104 oop_size = oop(bottom)->oop_iterate(cl2, mr); 105 } else { 106 oop_size = _hr->block_size(bottom); 107 } 108 109 bottom += oop_size; 110 111 if (bottom < top) { 112 // We replicate the loop below for several kinds of possible filters. 113 switch (_fk) { 114 case NoFilterKind: 115 bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top); 116 break; 117 118 case IntoCSFilterKind: { 119 FilterIntoCSClosure filt(this, g1h, _cl); 120 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 121 break; 122 } 123 124 case OutOfRegionFilterKind: { 125 FilterOutOfRegionClosure filt(_hr, _cl); 126 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 127 break; 128 } 129 130 default: 131 ShouldNotReachHere(); 132 } 133 134 // Last object. Need to do dead-obj filtering here too. 135 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 136 oop(bottom)->oop_iterate(cl2, mr); 137 } 138 } 139 } 140 141 size_t HeapRegion::max_region_size() { 142 return HeapRegionBounds::max_size(); 143 } 144 145 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 146 uintx region_size = G1HeapRegionSize; 147 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 148 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 149 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 150 (uintx) HeapRegionBounds::min_size()); 151 } 152 153 int region_size_log = log2_long((jlong) region_size); 154 // Recalculate the region size to make sure it's a power of 155 // 2. This means that region_size is the largest power of 2 that's 156 // <= what we've calculated so far. 157 region_size = ((uintx)1 << region_size_log); 158 159 // Now make sure that we don't go over or under our limits. 160 if (region_size < HeapRegionBounds::min_size()) { 161 region_size = HeapRegionBounds::min_size(); 162 } else if (region_size > HeapRegionBounds::max_size()) { 163 region_size = HeapRegionBounds::max_size(); 164 } 165 166 // And recalculate the log. 167 region_size_log = log2_long((jlong) region_size); 168 169 // Now, set up the globals. 170 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 171 LogOfHRGrainBytes = region_size_log; 172 173 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 174 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 175 176 guarantee(GrainBytes == 0, "we should only set it once"); 177 // The cast to int is safe, given that we've bounded region_size by 178 // MIN_REGION_SIZE and MAX_REGION_SIZE. 179 GrainBytes = (size_t)region_size; 180 181 guarantee(GrainWords == 0, "we should only set it once"); 182 GrainWords = GrainBytes >> LogHeapWordSize; 183 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 184 185 guarantee(CardsPerRegion == 0, "we should only set it once"); 186 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 187 } 188 189 void HeapRegion::reset_after_compaction() { 190 G1OffsetTableContigSpace::reset_after_compaction(); 191 // After a compaction the mark bitmap is invalid, so we must 192 // treat all objects as being inside the unmarked area. 193 zero_marked_bytes(); 194 init_top_at_mark_start(); 195 } 196 197 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 198 assert(_humongous_start_region == NULL, 199 "we should have already filtered out humongous regions"); 200 assert(_end == _orig_end, 201 "we should have already filtered out humongous regions"); 202 203 _in_collection_set = false; 204 205 set_allocation_context(AllocationContext::system()); 206 set_young_index_in_cset(-1); 207 uninstall_surv_rate_group(); 208 set_free(); 209 reset_pre_dummy_top(); 210 211 if (!par) { 212 // If this is parallel, this will be done later. 213 HeapRegionRemSet* hrrs = rem_set(); 214 if (locked) { 215 hrrs->clear_locked(); 216 } else { 217 hrrs->clear(); 218 } 219 _claimed = InitialClaimValue; 220 } 221 zero_marked_bytes(); 222 223 _offsets.resize(HeapRegion::GrainWords); 224 init_top_at_mark_start(); 225 if (clear_space) clear(SpaceDecorator::Mangle); 226 } 227 228 void HeapRegion::par_clear() { 229 assert(used() == 0, "the region should have been already cleared"); 230 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 231 HeapRegionRemSet* hrrs = rem_set(); 232 hrrs->clear(); 233 CardTableModRefBS* ct_bs = 234 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 235 ct_bs->clear(MemRegion(bottom(), end())); 236 } 237 238 void HeapRegion::calc_gc_efficiency() { 239 // GC efficiency is the ratio of how much space would be 240 // reclaimed over how long we predict it would take to reclaim it. 241 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 242 G1CollectorPolicy* g1p = g1h->g1_policy(); 243 244 // Retrieve a prediction of the elapsed time for this region for 245 // a mixed gc because the region will only be evacuated during a 246 // mixed gc. 247 double region_elapsed_time_ms = 248 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 249 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 250 } 251 252 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { 253 assert(!isHumongous(), "sanity / pre-condition"); 254 assert(end() == _orig_end, 255 "Should be normal before the humongous object allocation"); 256 assert(top() == bottom(), "should be empty"); 257 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 258 259 _type.set_starts_humongous(); 260 _humongous_start_region = this; 261 262 set_end(new_end); 263 _offsets.set_for_starts_humongous(new_top); 264 } 265 266 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { 267 assert(!isHumongous(), "sanity / pre-condition"); 268 assert(end() == _orig_end, 269 "Should be normal before the humongous object allocation"); 270 assert(top() == bottom(), "should be empty"); 271 assert(first_hr->startsHumongous(), "pre-condition"); 272 273 _type.set_continues_humongous(); 274 _humongous_start_region = first_hr; 275 } 276 277 void HeapRegion::clear_humongous() { 278 assert(isHumongous(), "pre-condition"); 279 280 if (startsHumongous()) { 281 assert(top() <= end(), "pre-condition"); 282 set_end(_orig_end); 283 if (top() > end()) { 284 // at least one "continues humongous" region after it 285 set_top(end()); 286 } 287 } else { 288 // continues humongous 289 assert(end() == _orig_end, "sanity"); 290 } 291 292 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 293 _humongous_start_region = NULL; 294 } 295 296 bool HeapRegion::claimHeapRegion(jint claimValue) { 297 jint current = _claimed; 298 if (current != claimValue) { 299 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); 300 if (res == current) { 301 return true; 302 } 303 } 304 return false; 305 } 306 307 HeapRegion::HeapRegion(uint hrm_index, 308 G1BlockOffsetSharedArray* sharedOffsetArray, 309 MemRegion mr) : 310 G1OffsetTableContigSpace(sharedOffsetArray, mr), 311 _hrm_index(hrm_index), 312 _allocation_context(AllocationContext::system()), 313 _humongous_start_region(NULL), 314 _in_collection_set(false), 315 _next_in_special_set(NULL), _orig_end(NULL), 316 _claimed(InitialClaimValue), _evacuation_failed(false), 317 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 318 _next_young_region(NULL), 319 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), 320 #ifdef ASSERT 321 _containing_set(NULL), 322 #endif // ASSERT 323 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 324 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 325 _predicted_bytes_to_copy(0) 326 { 327 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 328 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 329 330 initialize(mr); 331 } 332 333 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 334 assert(_rem_set->is_empty(), "Remembered set must be empty"); 335 336 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); 337 338 _orig_end = mr.end(); 339 hr_clear(false /*par*/, false /*clear_space*/); 340 set_top(bottom()); 341 record_top_and_timestamp(); 342 } 343 344 CompactibleSpace* HeapRegion::next_compaction_space() const { 345 return G1CollectedHeap::heap()->next_compaction_region(this); 346 } 347 348 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 349 bool during_conc_mark) { 350 // We always recreate the prev marking info and we'll explicitly 351 // mark all objects we find to be self-forwarded on the prev 352 // bitmap. So all objects need to be below PTAMS. 353 _prev_marked_bytes = 0; 354 355 if (during_initial_mark) { 356 // During initial-mark, we'll also explicitly mark all objects 357 // we find to be self-forwarded on the next bitmap. So all 358 // objects need to be below NTAMS. 359 _next_top_at_mark_start = top(); 360 _next_marked_bytes = 0; 361 } else if (during_conc_mark) { 362 // During concurrent mark, all objects in the CSet (including 363 // the ones we find to be self-forwarded) are implicitly live. 364 // So all objects need to be above NTAMS. 365 _next_top_at_mark_start = bottom(); 366 _next_marked_bytes = 0; 367 } 368 } 369 370 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 371 bool during_conc_mark, 372 size_t marked_bytes) { 373 assert(0 <= marked_bytes && marked_bytes <= used(), 374 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, 375 marked_bytes, used())); 376 _prev_top_at_mark_start = top(); 377 _prev_marked_bytes = marked_bytes; 378 } 379 380 HeapWord* 381 HeapRegion::object_iterate_mem_careful(MemRegion mr, 382 ObjectClosure* cl) { 383 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 384 // We used to use "block_start_careful" here. But we're actually happy 385 // to update the BOT while we do this... 386 HeapWord* cur = block_start(mr.start()); 387 mr = mr.intersection(used_region()); 388 if (mr.is_empty()) return NULL; 389 // Otherwise, find the obj that extends onto mr.start(). 390 391 assert(cur <= mr.start() 392 && (oop(cur)->klass_or_null() == NULL || 393 cur + oop(cur)->size() > mr.start()), 394 "postcondition of block_start"); 395 oop obj; 396 while (cur < mr.end()) { 397 obj = oop(cur); 398 if (obj->klass_or_null() == NULL) { 399 // Ran into an unparseable point. 400 return cur; 401 } else if (!g1h->is_obj_dead(obj)) { 402 cl->do_object(obj); 403 } 404 if (cl->abort()) return cur; 405 // The check above must occur before the operation below, since an 406 // abort might invalidate the "size" operation. 407 cur += block_size(cur); 408 } 409 return NULL; 410 } 411 412 HeapWord* 413 HeapRegion:: 414 oops_on_card_seq_iterate_careful(MemRegion mr, 415 FilterOutOfRegionClosure* cl, 416 bool filter_young, 417 jbyte* card_ptr) { 418 // Currently, we should only have to clean the card if filter_young 419 // is true and vice versa. 420 if (filter_young) { 421 assert(card_ptr != NULL, "pre-condition"); 422 } else { 423 assert(card_ptr == NULL, "pre-condition"); 424 } 425 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 426 427 // If we're within a stop-world GC, then we might look at a card in a 428 // GC alloc region that extends onto a GC LAB, which may not be 429 // parseable. Stop such at the "saved_mark" of the region. 430 if (g1h->is_gc_active()) { 431 mr = mr.intersection(used_region_at_save_marks()); 432 } else { 433 mr = mr.intersection(used_region()); 434 } 435 if (mr.is_empty()) return NULL; 436 // Otherwise, find the obj that extends onto mr.start(). 437 438 // The intersection of the incoming mr (for the card) and the 439 // allocated part of the region is non-empty. This implies that 440 // we have actually allocated into this region. The code in 441 // G1CollectedHeap.cpp that allocates a new region sets the 442 // is_young tag on the region before allocating. Thus we 443 // safely know if this region is young. 444 if (is_young() && filter_young) { 445 return NULL; 446 } 447 448 assert(!is_young(), "check value of filter_young"); 449 450 // We can only clean the card here, after we make the decision that 451 // the card is not young. And we only clean the card if we have been 452 // asked to (i.e., card_ptr != NULL). 453 if (card_ptr != NULL) { 454 *card_ptr = CardTableModRefBS::clean_card_val(); 455 // We must complete this write before we do any of the reads below. 456 OrderAccess::storeload(); 457 } 458 459 // Cache the boundaries of the memory region in some const locals 460 HeapWord* const start = mr.start(); 461 HeapWord* const end = mr.end(); 462 463 // We used to use "block_start_careful" here. But we're actually happy 464 // to update the BOT while we do this... 465 HeapWord* cur = block_start(start); 466 assert(cur <= start, "Postcondition"); 467 468 oop obj; 469 470 HeapWord* next = cur; 471 while (next <= start) { 472 cur = next; 473 obj = oop(cur); 474 if (obj->klass_or_null() == NULL) { 475 // Ran into an unparseable point. 476 return cur; 477 } 478 // Otherwise... 479 next = cur + block_size(cur); 480 } 481 482 // If we finish the above loop...We have a parseable object that 483 // begins on or before the start of the memory region, and ends 484 // inside or spans the entire region. 485 486 assert(obj == oop(cur), "sanity"); 487 assert(cur <= start, "Loop postcondition"); 488 assert(obj->klass_or_null() != NULL, "Loop postcondition"); 489 assert((cur + block_size(cur)) > start, "Loop postcondition"); 490 491 if (!g1h->is_obj_dead(obj)) { 492 obj->oop_iterate(cl, mr); 493 } 494 495 while (cur < end) { 496 obj = oop(cur); 497 if (obj->klass_or_null() == NULL) { 498 // Ran into an unparseable point. 499 return cur; 500 }; 501 502 // Otherwise: 503 next = cur + block_size(cur); 504 505 if (!g1h->is_obj_dead(obj)) { 506 if (next < end || !obj->is_objArray()) { 507 // This object either does not span the MemRegion 508 // boundary, or if it does it's not an array. 509 // Apply closure to whole object. 510 obj->oop_iterate(cl); 511 } else { 512 // This obj is an array that spans the boundary. 513 // Stop at the boundary. 514 obj->oop_iterate(cl, mr); 515 } 516 } 517 cur = next; 518 } 519 return NULL; 520 } 521 522 // Code roots support 523 524 void HeapRegion::add_strong_code_root(nmethod* nm) { 525 HeapRegionRemSet* hrrs = rem_set(); 526 hrrs->add_strong_code_root(nm); 527 } 528 529 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 530 assert_locked_or_safepoint(CodeCache_lock); 531 HeapRegionRemSet* hrrs = rem_set(); 532 hrrs->add_strong_code_root_locked(nm); 533 } 534 535 void HeapRegion::remove_strong_code_root(nmethod* nm) { 536 HeapRegionRemSet* hrrs = rem_set(); 537 hrrs->remove_strong_code_root(nm); 538 } 539 540 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 541 HeapRegionRemSet* hrrs = rem_set(); 542 hrrs->strong_code_roots_do(blk); 543 } 544 545 class VerifyStrongCodeRootOopClosure: public OopClosure { 546 const HeapRegion* _hr; 547 nmethod* _nm; 548 bool _failures; 549 bool _has_oops_in_region; 550 551 template <class T> void do_oop_work(T* p) { 552 T heap_oop = oopDesc::load_heap_oop(p); 553 if (!oopDesc::is_null(heap_oop)) { 554 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 555 556 // Note: not all the oops embedded in the nmethod are in the 557 // current region. We only look at those which are. 558 if (_hr->is_in(obj)) { 559 // Object is in the region. Check that its less than top 560 if (_hr->top() <= (HeapWord*)obj) { 561 // Object is above top 562 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " 563 "["PTR_FORMAT", "PTR_FORMAT") is above " 564 "top "PTR_FORMAT, 565 (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); 566 _failures = true; 567 return; 568 } 569 // Nmethod has at least one oop in the current region 570 _has_oops_in_region = true; 571 } 572 } 573 } 574 575 public: 576 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 577 _hr(hr), _failures(false), _has_oops_in_region(false) {} 578 579 void do_oop(narrowOop* p) { do_oop_work(p); } 580 void do_oop(oop* p) { do_oop_work(p); } 581 582 bool failures() { return _failures; } 583 bool has_oops_in_region() { return _has_oops_in_region; } 584 }; 585 586 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 587 const HeapRegion* _hr; 588 bool _failures; 589 public: 590 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 591 _hr(hr), _failures(false) {} 592 593 void do_code_blob(CodeBlob* cb) { 594 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 595 if (nm != NULL) { 596 // Verify that the nemthod is live 597 if (!nm->is_alive()) { 598 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " 599 PTR_FORMAT" in its strong code roots", 600 _hr->bottom(), _hr->end(), nm); 601 _failures = true; 602 } else { 603 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 604 nm->oops_do(&oop_cl); 605 if (!oop_cl.has_oops_in_region()) { 606 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " 607 PTR_FORMAT" in its strong code roots " 608 "with no pointers into region", 609 _hr->bottom(), _hr->end(), nm); 610 _failures = true; 611 } else if (oop_cl.failures()) { 612 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " 613 "failures for nmethod "PTR_FORMAT, 614 _hr->bottom(), _hr->end(), nm); 615 _failures = true; 616 } 617 } 618 } 619 } 620 621 bool failures() { return _failures; } 622 }; 623 624 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 625 if (!G1VerifyHeapRegionCodeRoots) { 626 // We're not verifying code roots. 627 return; 628 } 629 if (vo == VerifyOption_G1UseMarkWord) { 630 // Marking verification during a full GC is performed after class 631 // unloading, code cache unloading, etc so the strong code roots 632 // attached to each heap region are in an inconsistent state. They won't 633 // be consistent until the strong code roots are rebuilt after the 634 // actual GC. Skip verifying the strong code roots in this particular 635 // time. 636 assert(VerifyDuringGC, "only way to get here"); 637 return; 638 } 639 640 HeapRegionRemSet* hrrs = rem_set(); 641 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 642 643 // if this region is empty then there should be no entries 644 // on its strong code root list 645 if (is_empty()) { 646 if (strong_code_roots_length > 0) { 647 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " 648 "but has "SIZE_FORMAT" code root entries", 649 bottom(), end(), strong_code_roots_length); 650 *failures = true; 651 } 652 return; 653 } 654 655 if (continuesHumongous()) { 656 if (strong_code_roots_length > 0) { 657 gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " 658 "region but has "SIZE_FORMAT" code root entries", 659 HR_FORMAT_PARAMS(this), strong_code_roots_length); 660 *failures = true; 661 } 662 return; 663 } 664 665 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 666 strong_code_roots_do(&cb_cl); 667 668 if (cb_cl.failures()) { 669 *failures = true; 670 } 671 } 672 673 void HeapRegion::print() const { print_on(gclog_or_tty); } 674 void HeapRegion::print_on(outputStream* st) const { 675 st->print("AC%4u", allocation_context()); 676 st->print(" %2s", get_short_type_str()); 677 if (in_collection_set()) 678 st->print(" CS"); 679 else 680 st->print(" "); 681 st->print(" TS %5d", _gc_time_stamp); 682 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 683 prev_top_at_mark_start(), next_top_at_mark_start()); 684 G1OffsetTableContigSpace::print_on(st); 685 } 686 687 class VerifyLiveClosure: public OopClosure { 688 private: 689 G1CollectedHeap* _g1h; 690 CardTableModRefBS* _bs; 691 oop _containing_obj; 692 bool _failures; 693 int _n_failures; 694 VerifyOption _vo; 695 public: 696 // _vo == UsePrevMarking -> use "prev" marking information, 697 // _vo == UseNextMarking -> use "next" marking information, 698 // _vo == UseMarkWord -> use mark word from object header. 699 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 700 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 701 _failures(false), _n_failures(0), _vo(vo) 702 { 703 BarrierSet* bs = _g1h->barrier_set(); 704 if (bs->is_a(BarrierSet::CardTableModRef)) 705 _bs = (CardTableModRefBS*)bs; 706 } 707 708 void set_containing_obj(oop obj) { 709 _containing_obj = obj; 710 } 711 712 bool failures() { return _failures; } 713 int n_failures() { return _n_failures; } 714 715 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 716 virtual void do_oop( oop* p) { do_oop_work(p); } 717 718 void print_object(outputStream* out, oop obj) { 719 #ifdef PRODUCT 720 Klass* k = obj->klass(); 721 const char* class_name = InstanceKlass::cast(k)->external_name(); 722 out->print_cr("class name %s", class_name); 723 #else // PRODUCT 724 obj->print_on(out); 725 #endif // PRODUCT 726 } 727 728 template <class T> 729 void do_oop_work(T* p) { 730 assert(_containing_obj != NULL, "Precondition"); 731 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 732 "Precondition"); 733 T heap_oop = oopDesc::load_heap_oop(p); 734 if (!oopDesc::is_null(heap_oop)) { 735 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 736 bool failed = false; 737 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 738 MutexLockerEx x(ParGCRareEvent_lock, 739 Mutex::_no_safepoint_check_flag); 740 741 if (!_failures) { 742 gclog_or_tty->cr(); 743 gclog_or_tty->print_cr("----------"); 744 } 745 if (!_g1h->is_in_closed_subset(obj)) { 746 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 747 gclog_or_tty->print_cr("Field "PTR_FORMAT 748 " of live obj "PTR_FORMAT" in region " 749 "["PTR_FORMAT", "PTR_FORMAT")", 750 p, (void*) _containing_obj, 751 from->bottom(), from->end()); 752 print_object(gclog_or_tty, _containing_obj); 753 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", 754 (void*) obj); 755 } else { 756 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 757 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 758 gclog_or_tty->print_cr("Field "PTR_FORMAT 759 " of live obj "PTR_FORMAT" in region " 760 "["PTR_FORMAT", "PTR_FORMAT")", 761 p, (void*) _containing_obj, 762 from->bottom(), from->end()); 763 print_object(gclog_or_tty, _containing_obj); 764 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " 765 "["PTR_FORMAT", "PTR_FORMAT")", 766 (void*) obj, to->bottom(), to->end()); 767 print_object(gclog_or_tty, obj); 768 } 769 gclog_or_tty->print_cr("----------"); 770 gclog_or_tty->flush(); 771 _failures = true; 772 failed = true; 773 _n_failures++; 774 } 775 776 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { 777 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 778 HeapRegion* to = _g1h->heap_region_containing(obj); 779 if (from != NULL && to != NULL && 780 from != to && 781 !to->isHumongous()) { 782 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 783 jbyte cv_field = *_bs->byte_for_const(p); 784 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 785 786 bool is_bad = !(from->is_young() 787 || to->rem_set()->contains_reference(p) 788 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 789 (_containing_obj->is_objArray() ? 790 cv_field == dirty 791 : cv_obj == dirty || cv_field == dirty)); 792 if (is_bad) { 793 MutexLockerEx x(ParGCRareEvent_lock, 794 Mutex::_no_safepoint_check_flag); 795 796 if (!_failures) { 797 gclog_or_tty->cr(); 798 gclog_or_tty->print_cr("----------"); 799 } 800 gclog_or_tty->print_cr("Missing rem set entry:"); 801 gclog_or_tty->print_cr("Field "PTR_FORMAT" " 802 "of obj "PTR_FORMAT", " 803 "in region "HR_FORMAT, 804 p, (void*) _containing_obj, 805 HR_FORMAT_PARAMS(from)); 806 _containing_obj->print_on(gclog_or_tty); 807 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " 808 "in region "HR_FORMAT, 809 (void*) obj, 810 HR_FORMAT_PARAMS(to)); 811 obj->print_on(gclog_or_tty); 812 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 813 cv_obj, cv_field); 814 gclog_or_tty->print_cr("----------"); 815 gclog_or_tty->flush(); 816 _failures = true; 817 if (!failed) _n_failures++; 818 } 819 } 820 } 821 } 822 } 823 }; 824 825 // This really ought to be commoned up into OffsetTableContigSpace somehow. 826 // We would need a mechanism to make that code skip dead objects. 827 828 void HeapRegion::verify(VerifyOption vo, 829 bool* failures) const { 830 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 831 *failures = false; 832 HeapWord* p = bottom(); 833 HeapWord* prev_p = NULL; 834 VerifyLiveClosure vl_cl(g1, vo); 835 bool is_humongous = isHumongous(); 836 bool do_bot_verify = !is_young(); 837 size_t object_num = 0; 838 while (p < top()) { 839 oop obj = oop(p); 840 size_t obj_size = block_size(p); 841 object_num += 1; 842 843 if (is_humongous != g1->isHumongous(obj_size) && 844 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. 845 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 846 SIZE_FORMAT" words) in a %shumongous region", 847 p, g1->isHumongous(obj_size) ? "" : "non-", 848 obj_size, is_humongous ? "" : "non-"); 849 *failures = true; 850 return; 851 } 852 853 // If it returns false, verify_for_object() will output the 854 // appropriate message. 855 if (do_bot_verify && 856 !g1->is_obj_dead(obj, this) && 857 !_offsets.verify_for_object(p, obj_size)) { 858 *failures = true; 859 return; 860 } 861 862 if (!g1->is_obj_dead_cond(obj, this, vo)) { 863 if (obj->is_oop()) { 864 Klass* klass = obj->klass(); 865 bool is_metaspace_object = Metaspace::contains(klass) || 866 (vo == VerifyOption_G1UsePrevMarking && 867 ClassLoaderDataGraph::unload_list_contains(klass)); 868 if (!is_metaspace_object) { 869 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 870 "not metadata", klass, (void *)obj); 871 *failures = true; 872 return; 873 } else if (!klass->is_klass()) { 874 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 875 "not a klass", klass, (void *)obj); 876 *failures = true; 877 return; 878 } else { 879 vl_cl.set_containing_obj(obj); 880 obj->oop_iterate_no_header(&vl_cl); 881 if (vl_cl.failures()) { 882 *failures = true; 883 } 884 if (G1MaxVerifyFailures >= 0 && 885 vl_cl.n_failures() >= G1MaxVerifyFailures) { 886 return; 887 } 888 } 889 } else { 890 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj); 891 *failures = true; 892 return; 893 } 894 } 895 prev_p = p; 896 p += obj_size; 897 } 898 899 if (p != top()) { 900 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " 901 "does not match top "PTR_FORMAT, p, top()); 902 *failures = true; 903 return; 904 } 905 906 HeapWord* the_end = end(); 907 assert(p == top(), "it should still hold"); 908 // Do some extra BOT consistency checking for addresses in the 909 // range [top, end). BOT look-ups in this range should yield 910 // top. No point in doing that if top == end (there's nothing there). 911 if (p < the_end) { 912 // Look up top 913 HeapWord* addr_1 = p; 914 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 915 if (b_start_1 != p) { 916 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " 917 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 918 addr_1, b_start_1, p); 919 *failures = true; 920 return; 921 } 922 923 // Look up top + 1 924 HeapWord* addr_2 = p + 1; 925 if (addr_2 < the_end) { 926 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 927 if (b_start_2 != p) { 928 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " 929 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 930 addr_2, b_start_2, p); 931 *failures = true; 932 return; 933 } 934 } 935 936 // Look up an address between top and end 937 size_t diff = pointer_delta(the_end, p) / 2; 938 HeapWord* addr_3 = p + diff; 939 if (addr_3 < the_end) { 940 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 941 if (b_start_3 != p) { 942 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " 943 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 944 addr_3, b_start_3, p); 945 *failures = true; 946 return; 947 } 948 } 949 950 // Loook up end - 1 951 HeapWord* addr_4 = the_end - 1; 952 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 953 if (b_start_4 != p) { 954 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " 955 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 956 addr_4, b_start_4, p); 957 *failures = true; 958 return; 959 } 960 } 961 962 if (is_humongous && object_num > 1) { 963 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 964 "but has "SIZE_FORMAT", objects", 965 bottom(), end(), object_num); 966 *failures = true; 967 return; 968 } 969 970 verify_strong_code_roots(vo, failures); 971 } 972 973 void HeapRegion::verify() const { 974 bool dummy = false; 975 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 976 } 977 978 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 979 // away eventually. 980 981 void G1OffsetTableContigSpace::clear(bool mangle_space) { 982 set_top(bottom()); 983 set_saved_mark_word(bottom()); 984 CompactibleSpace::clear(mangle_space); 985 reset_bot(); 986 } 987 988 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 989 Space::set_bottom(new_bottom); 990 _offsets.set_bottom(new_bottom); 991 } 992 993 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 994 Space::set_end(new_end); 995 _offsets.resize(new_end - bottom()); 996 } 997 998 void G1OffsetTableContigSpace::print() const { 999 print_short(); 1000 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1001 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1002 bottom(), top(), _offsets.threshold(), end()); 1003 } 1004 1005 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 1006 return _offsets.initialize_threshold(); 1007 } 1008 1009 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 1010 HeapWord* end) { 1011 _offsets.alloc_block(start, end); 1012 return _offsets.threshold(); 1013 } 1014 1015 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { 1016 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1017 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1018 HeapWord* local_top = top(); 1019 OrderAccess::loadload(); 1020 if (_gc_time_stamp < g1h->get_gc_time_stamp()) { 1021 return local_top; 1022 } else { 1023 return Space::saved_mark_word(); 1024 } 1025 } 1026 1027 void G1OffsetTableContigSpace::record_top_and_timestamp() { 1028 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1029 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1030 1031 if (_gc_time_stamp < curr_gc_time_stamp) { 1032 // The order of these is important, as another thread might be 1033 // about to start scanning this region. If it does so after 1034 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1035 // will be false, and it will pick up top() as the high water mark 1036 // of region. If it does so after _gc_time_stamp = ..., then it 1037 // will pick up the right saved_mark_word() as the high water mark 1038 // of the region. Either way, the behaviour will be correct. 1039 Space::set_saved_mark_word(top()); 1040 OrderAccess::storestore(); 1041 _gc_time_stamp = curr_gc_time_stamp; 1042 // No need to do another barrier to flush the writes above. If 1043 // this is called in parallel with other threads trying to 1044 // allocate into the region, the caller should call this while 1045 // holding a lock and when the lock is released the writes will be 1046 // flushed. 1047 } 1048 } 1049 1050 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { 1051 object_iterate(blk); 1052 } 1053 1054 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { 1055 HeapWord* p = bottom(); 1056 while (p < top()) { 1057 if (block_is_obj(p)) { 1058 blk->do_object(oop(p)); 1059 } 1060 p += block_size(p); 1061 } 1062 } 1063 1064 #define block_is_always_obj(q) true 1065 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) { 1066 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size); 1067 } 1068 #undef block_is_always_obj 1069 1070 G1OffsetTableContigSpace:: 1071 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1072 MemRegion mr) : 1073 _offsets(sharedOffsetArray, mr), 1074 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1075 _gc_time_stamp(0) 1076 { 1077 _offsets.set_space(this); 1078 } 1079 1080 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1081 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1082 _top = bottom(); 1083 reset_bot(); 1084 } 1085