1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/heapRegion.inline.hpp" 31 #include "gc/g1/heapRegionBounds.inline.hpp" 32 #include "gc/g1/heapRegionManager.inline.hpp" 33 #include "gc/g1/heapRegionRemSet.hpp" 34 #include "gc/shared/genOopClosures.inline.hpp" 35 #include "gc/shared/liveRange.hpp" 36 #include "gc/shared/space.inline.hpp" 37 #include "logging/log.hpp" 38 #include "memory/iterator.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/orderAccess.inline.hpp" 42 43 int HeapRegion::LogOfHRGrainBytes = 0; 44 int HeapRegion::LogOfHRGrainWords = 0; 45 size_t HeapRegion::GrainBytes = 0; 46 size_t HeapRegion::GrainWords = 0; 47 size_t HeapRegion::CardsPerRegion = 0; 48 49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 50 HeapRegion* hr, 51 G1ParPushHeapRSClosure* cl, 52 CardTableModRefBS::PrecisionStyle precision) : 53 DirtyCardToOopClosure(hr, cl, precision, NULL), 54 _hr(hr), _rs_scan(cl), _g1(g1) { } 55 56 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 57 OopClosure* oc) : 58 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 59 60 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 61 HeapWord* bottom, 62 HeapWord* top) { 63 G1CollectedHeap* g1h = _g1; 64 size_t oop_size; 65 HeapWord* cur = bottom; 66 67 // Start filtering what we add to the remembered set. If the object is 68 // not considered dead, either because it is marked (in the mark bitmap) 69 // or it was allocated after marking finished, then we add it. Otherwise 70 // we can safely ignore the object. 71 if (!g1h->is_obj_dead(oop(cur), _hr)) { 72 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr); 73 } else { 74 oop_size = _hr->block_size(cur); 75 } 76 77 cur += oop_size; 78 79 if (cur < top) { 80 oop cur_oop = oop(cur); 81 oop_size = _hr->block_size(cur); 82 HeapWord* next_obj = cur + oop_size; 83 while (next_obj < top) { 84 // Keep filtering the remembered set. 85 if (!g1h->is_obj_dead(cur_oop, _hr)) { 86 // Bottom lies entirely below top, so we can call the 87 // non-memRegion version of oop_iterate below. 88 cur_oop->oop_iterate(_rs_scan); 89 } 90 cur = next_obj; 91 cur_oop = oop(cur); 92 oop_size = _hr->block_size(cur); 93 next_obj = cur + oop_size; 94 } 95 96 // Last object. Need to do dead-obj filtering here too. 97 if (!g1h->is_obj_dead(oop(cur), _hr)) { 98 oop(cur)->oop_iterate(_rs_scan, mr); 99 } 100 } 101 } 102 103 size_t HeapRegion::max_region_size() { 104 return HeapRegionBounds::max_size(); 105 } 106 107 size_t HeapRegion::min_region_size_in_words() { 108 return HeapRegionBounds::min_size() >> LogHeapWordSize; 109 } 110 111 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 112 size_t region_size = G1HeapRegionSize; 113 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 114 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 115 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 116 HeapRegionBounds::min_size()); 117 } 118 119 int region_size_log = log2_long((jlong) region_size); 120 // Recalculate the region size to make sure it's a power of 121 // 2. This means that region_size is the largest power of 2 that's 122 // <= what we've calculated so far. 123 region_size = ((size_t)1 << region_size_log); 124 125 // Now make sure that we don't go over or under our limits. 126 if (region_size < HeapRegionBounds::min_size()) { 127 region_size = HeapRegionBounds::min_size(); 128 } else if (region_size > HeapRegionBounds::max_size()) { 129 region_size = HeapRegionBounds::max_size(); 130 } 131 132 // And recalculate the log. 133 region_size_log = log2_long((jlong) region_size); 134 135 // Now, set up the globals. 136 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 137 LogOfHRGrainBytes = region_size_log; 138 139 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 140 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 141 142 guarantee(GrainBytes == 0, "we should only set it once"); 143 // The cast to int is safe, given that we've bounded region_size by 144 // MIN_REGION_SIZE and MAX_REGION_SIZE. 145 GrainBytes = region_size; 146 147 guarantee(GrainWords == 0, "we should only set it once"); 148 GrainWords = GrainBytes >> LogHeapWordSize; 149 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 150 151 guarantee(CardsPerRegion == 0, "we should only set it once"); 152 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 153 } 154 155 void HeapRegion::reset_after_compaction() { 156 G1OffsetTableContigSpace::reset_after_compaction(); 157 // After a compaction the mark bitmap is invalid, so we must 158 // treat all objects as being inside the unmarked area. 159 zero_marked_bytes(); 160 init_top_at_mark_start(); 161 } 162 163 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 164 assert(_humongous_start_region == NULL, 165 "we should have already filtered out humongous regions"); 166 assert(_end == orig_end(), 167 "we should have already filtered out humongous regions"); 168 assert(!in_collection_set(), 169 "Should not clear heap region %u in the collection set", hrm_index()); 170 171 set_allocation_context(AllocationContext::system()); 172 set_young_index_in_cset(-1); 173 uninstall_surv_rate_group(); 174 set_free(); 175 reset_pre_dummy_top(); 176 177 if (!par) { 178 // If this is parallel, this will be done later. 179 HeapRegionRemSet* hrrs = rem_set(); 180 if (locked) { 181 hrrs->clear_locked(); 182 } else { 183 hrrs->clear(); 184 } 185 } 186 zero_marked_bytes(); 187 188 _offsets.resize(HeapRegion::GrainWords); 189 init_top_at_mark_start(); 190 if (clear_space) clear(SpaceDecorator::Mangle); 191 } 192 193 void HeapRegion::par_clear() { 194 assert(used() == 0, "the region should have been already cleared"); 195 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 196 HeapRegionRemSet* hrrs = rem_set(); 197 hrrs->clear(); 198 CardTableModRefBS* ct_bs = 199 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set()); 200 ct_bs->clear(MemRegion(bottom(), end())); 201 } 202 203 void HeapRegion::calc_gc_efficiency() { 204 // GC efficiency is the ratio of how much space would be 205 // reclaimed over how long we predict it would take to reclaim it. 206 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 207 G1CollectorPolicy* g1p = g1h->g1_policy(); 208 209 // Retrieve a prediction of the elapsed time for this region for 210 // a mixed gc because the region will only be evacuated during a 211 // mixed gc. 212 double region_elapsed_time_ms = 213 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 214 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 215 } 216 217 void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) { 218 assert(!is_humongous(), "sanity / pre-condition"); 219 assert(end() == orig_end(), 220 "Should be normal before the humongous object allocation"); 221 assert(top() == bottom(), "should be empty"); 222 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 223 224 _type.set_starts_humongous(); 225 _humongous_start_region = this; 226 227 set_end(new_end); 228 _offsets.set_for_starts_humongous(new_top); 229 } 230 231 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 232 assert(!is_humongous(), "sanity / pre-condition"); 233 assert(end() == orig_end(), 234 "Should be normal before the humongous object allocation"); 235 assert(top() == bottom(), "should be empty"); 236 assert(first_hr->is_starts_humongous(), "pre-condition"); 237 238 _type.set_continues_humongous(); 239 _humongous_start_region = first_hr; 240 } 241 242 void HeapRegion::clear_humongous() { 243 assert(is_humongous(), "pre-condition"); 244 245 if (is_starts_humongous()) { 246 assert(top() <= end(), "pre-condition"); 247 set_end(orig_end()); 248 if (top() > end()) { 249 // at least one "continues humongous" region after it 250 set_top(end()); 251 } 252 } else { 253 // continues humongous 254 assert(end() == orig_end(), "sanity"); 255 } 256 257 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 258 _humongous_start_region = NULL; 259 } 260 261 HeapRegion::HeapRegion(uint hrm_index, 262 G1BlockOffsetSharedArray* sharedOffsetArray, 263 MemRegion mr) : 264 G1OffsetTableContigSpace(sharedOffsetArray, mr), 265 _hrm_index(hrm_index), 266 _allocation_context(AllocationContext::system()), 267 _humongous_start_region(NULL), 268 _next_in_special_set(NULL), 269 _evacuation_failed(false), 270 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 271 _next_young_region(NULL), 272 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), 273 #ifdef ASSERT 274 _containing_set(NULL), 275 #endif // ASSERT 276 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 277 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 278 _predicted_bytes_to_copy(0) 279 { 280 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 281 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 282 283 initialize(mr); 284 } 285 286 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 287 assert(_rem_set->is_empty(), "Remembered set must be empty"); 288 289 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); 290 291 hr_clear(false /*par*/, false /*clear_space*/); 292 set_top(bottom()); 293 record_timestamp(); 294 295 assert(mr.end() == orig_end(), 296 "Given region end address " PTR_FORMAT " should match exactly " 297 "bottom plus one region size, i.e. " PTR_FORMAT, 298 p2i(mr.end()), p2i(orig_end())); 299 } 300 301 CompactibleSpace* HeapRegion::next_compaction_space() const { 302 return G1CollectedHeap::heap()->next_compaction_region(this); 303 } 304 305 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 306 bool during_conc_mark) { 307 // We always recreate the prev marking info and we'll explicitly 308 // mark all objects we find to be self-forwarded on the prev 309 // bitmap. So all objects need to be below PTAMS. 310 _prev_marked_bytes = 0; 311 312 if (during_initial_mark) { 313 // During initial-mark, we'll also explicitly mark all objects 314 // we find to be self-forwarded on the next bitmap. So all 315 // objects need to be below NTAMS. 316 _next_top_at_mark_start = top(); 317 _next_marked_bytes = 0; 318 } else if (during_conc_mark) { 319 // During concurrent mark, all objects in the CSet (including 320 // the ones we find to be self-forwarded) are implicitly live. 321 // So all objects need to be above NTAMS. 322 _next_top_at_mark_start = bottom(); 323 _next_marked_bytes = 0; 324 } 325 } 326 327 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 328 bool during_conc_mark, 329 size_t marked_bytes) { 330 assert(marked_bytes <= used(), 331 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 332 _prev_top_at_mark_start = top(); 333 _prev_marked_bytes = marked_bytes; 334 } 335 336 HeapWord* 337 HeapRegion::object_iterate_mem_careful(MemRegion mr, 338 ObjectClosure* cl) { 339 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 340 // We used to use "block_start_careful" here. But we're actually happy 341 // to update the BOT while we do this... 342 HeapWord* cur = block_start(mr.start()); 343 mr = mr.intersection(used_region()); 344 if (mr.is_empty()) return NULL; 345 // Otherwise, find the obj that extends onto mr.start(). 346 347 assert(cur <= mr.start() 348 && (oop(cur)->klass_or_null() == NULL || 349 cur + oop(cur)->size() > mr.start()), 350 "postcondition of block_start"); 351 oop obj; 352 while (cur < mr.end()) { 353 obj = oop(cur); 354 if (obj->klass_or_null() == NULL) { 355 // Ran into an unparseable point. 356 return cur; 357 } else if (!g1h->is_obj_dead(obj)) { 358 cl->do_object(obj); 359 } 360 cur += block_size(cur); 361 } 362 return NULL; 363 } 364 365 HeapWord* 366 HeapRegion:: 367 oops_on_card_seq_iterate_careful(MemRegion mr, 368 FilterOutOfRegionClosure* cl, 369 bool filter_young, 370 jbyte* card_ptr) { 371 // Currently, we should only have to clean the card if filter_young 372 // is true and vice versa. 373 if (filter_young) { 374 assert(card_ptr != NULL, "pre-condition"); 375 } else { 376 assert(card_ptr == NULL, "pre-condition"); 377 } 378 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 379 380 // If we're within a stop-world GC, then we might look at a card in a 381 // GC alloc region that extends onto a GC LAB, which may not be 382 // parseable. Stop such at the "scan_top" of the region. 383 if (g1h->is_gc_active()) { 384 mr = mr.intersection(MemRegion(bottom(), scan_top())); 385 } else { 386 mr = mr.intersection(used_region()); 387 } 388 if (mr.is_empty()) return NULL; 389 // Otherwise, find the obj that extends onto mr.start(). 390 391 // The intersection of the incoming mr (for the card) and the 392 // allocated part of the region is non-empty. This implies that 393 // we have actually allocated into this region. The code in 394 // G1CollectedHeap.cpp that allocates a new region sets the 395 // is_young tag on the region before allocating. Thus we 396 // safely know if this region is young. 397 if (is_young() && filter_young) { 398 return NULL; 399 } 400 401 assert(!is_young(), "check value of filter_young"); 402 403 // We can only clean the card here, after we make the decision that 404 // the card is not young. And we only clean the card if we have been 405 // asked to (i.e., card_ptr != NULL). 406 if (card_ptr != NULL) { 407 *card_ptr = CardTableModRefBS::clean_card_val(); 408 // We must complete this write before we do any of the reads below. 409 OrderAccess::storeload(); 410 } 411 412 // Cache the boundaries of the memory region in some const locals 413 HeapWord* const start = mr.start(); 414 HeapWord* const end = mr.end(); 415 416 // We used to use "block_start_careful" here. But we're actually happy 417 // to update the BOT while we do this... 418 HeapWord* cur = block_start(start); 419 assert(cur <= start, "Postcondition"); 420 421 oop obj; 422 423 HeapWord* next = cur; 424 do { 425 cur = next; 426 obj = oop(cur); 427 if (obj->klass_or_null() == NULL) { 428 // Ran into an unparseable point. 429 return cur; 430 } 431 // Otherwise... 432 next = cur + block_size(cur); 433 } while (next <= start); 434 435 // If we finish the above loop...We have a parseable object that 436 // begins on or before the start of the memory region, and ends 437 // inside or spans the entire region. 438 assert(cur <= start, "Loop postcondition"); 439 assert(obj->klass_or_null() != NULL, "Loop postcondition"); 440 441 do { 442 obj = oop(cur); 443 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant"); 444 if (obj->klass_or_null() == NULL) { 445 // Ran into an unparseable point. 446 return cur; 447 } 448 449 // Advance the current pointer. "obj" still points to the object to iterate. 450 cur = cur + block_size(cur); 451 452 if (!g1h->is_obj_dead(obj)) { 453 // Non-objArrays are sometimes marked imprecise at the object start. We 454 // always need to iterate over them in full. 455 // We only iterate over object arrays in full if they are completely contained 456 // in the memory region. 457 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 458 obj->oop_iterate(cl); 459 } else { 460 obj->oop_iterate(cl, mr); 461 } 462 } 463 } while (cur < end); 464 465 return NULL; 466 } 467 468 // Code roots support 469 470 void HeapRegion::add_strong_code_root(nmethod* nm) { 471 HeapRegionRemSet* hrrs = rem_set(); 472 hrrs->add_strong_code_root(nm); 473 } 474 475 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 476 assert_locked_or_safepoint(CodeCache_lock); 477 HeapRegionRemSet* hrrs = rem_set(); 478 hrrs->add_strong_code_root_locked(nm); 479 } 480 481 void HeapRegion::remove_strong_code_root(nmethod* nm) { 482 HeapRegionRemSet* hrrs = rem_set(); 483 hrrs->remove_strong_code_root(nm); 484 } 485 486 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 487 HeapRegionRemSet* hrrs = rem_set(); 488 hrrs->strong_code_roots_do(blk); 489 } 490 491 class VerifyStrongCodeRootOopClosure: public OopClosure { 492 const HeapRegion* _hr; 493 nmethod* _nm; 494 bool _failures; 495 bool _has_oops_in_region; 496 497 template <class T> void do_oop_work(T* p) { 498 T heap_oop = oopDesc::load_heap_oop(p); 499 if (!oopDesc::is_null(heap_oop)) { 500 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 501 502 // Note: not all the oops embedded in the nmethod are in the 503 // current region. We only look at those which are. 504 if (_hr->is_in(obj)) { 505 // Object is in the region. Check that its less than top 506 if (_hr->top() <= (HeapWord*)obj) { 507 // Object is above top 508 log_info(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT, 509 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 510 _failures = true; 511 return; 512 } 513 // Nmethod has at least one oop in the current region 514 _has_oops_in_region = true; 515 } 516 } 517 } 518 519 public: 520 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 521 _hr(hr), _failures(false), _has_oops_in_region(false) {} 522 523 void do_oop(narrowOop* p) { do_oop_work(p); } 524 void do_oop(oop* p) { do_oop_work(p); } 525 526 bool failures() { return _failures; } 527 bool has_oops_in_region() { return _has_oops_in_region; } 528 }; 529 530 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 531 const HeapRegion* _hr; 532 bool _failures; 533 public: 534 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 535 _hr(hr), _failures(false) {} 536 537 void do_code_blob(CodeBlob* cb) { 538 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 539 if (nm != NULL) { 540 // Verify that the nemthod is live 541 if (!nm->is_alive()) { 542 log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 543 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 544 _failures = true; 545 } else { 546 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 547 nm->oops_do(&oop_cl); 548 if (!oop_cl.has_oops_in_region()) { 549 log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 550 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 551 _failures = true; 552 } else if (oop_cl.failures()) { 553 log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 554 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 555 _failures = true; 556 } 557 } 558 } 559 } 560 561 bool failures() { return _failures; } 562 }; 563 564 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 565 if (!G1VerifyHeapRegionCodeRoots) { 566 // We're not verifying code roots. 567 return; 568 } 569 if (vo == VerifyOption_G1UseMarkWord) { 570 // Marking verification during a full GC is performed after class 571 // unloading, code cache unloading, etc so the strong code roots 572 // attached to each heap region are in an inconsistent state. They won't 573 // be consistent until the strong code roots are rebuilt after the 574 // actual GC. Skip verifying the strong code roots in this particular 575 // time. 576 assert(VerifyDuringGC, "only way to get here"); 577 return; 578 } 579 580 HeapRegionRemSet* hrrs = rem_set(); 581 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 582 583 // if this region is empty then there should be no entries 584 // on its strong code root list 585 if (is_empty()) { 586 if (strong_code_roots_length > 0) { 587 log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries", 588 p2i(bottom()), p2i(end()), strong_code_roots_length); 589 *failures = true; 590 } 591 return; 592 } 593 594 if (is_continues_humongous()) { 595 if (strong_code_roots_length > 0) { 596 log_info(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 597 HR_FORMAT_PARAMS(this), strong_code_roots_length); 598 *failures = true; 599 } 600 return; 601 } 602 603 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 604 strong_code_roots_do(&cb_cl); 605 606 if (cb_cl.failures()) { 607 *failures = true; 608 } 609 } 610 611 void HeapRegion::print() const { print_on(tty); } 612 void HeapRegion::print_on(outputStream* st) const { 613 st->print("AC%4u", allocation_context()); 614 615 st->print(" %2s", get_short_type_str()); 616 if (in_collection_set()) 617 st->print(" CS"); 618 else 619 st->print(" "); 620 st->print(" TS %5d", _gc_time_stamp); 621 st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT, 622 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start())); 623 G1OffsetTableContigSpace::print_on(st); 624 } 625 626 class VerifyLiveClosure: public OopClosure { 627 private: 628 G1CollectedHeap* _g1h; 629 CardTableModRefBS* _bs; 630 oop _containing_obj; 631 bool _failures; 632 int _n_failures; 633 VerifyOption _vo; 634 public: 635 // _vo == UsePrevMarking -> use "prev" marking information, 636 // _vo == UseNextMarking -> use "next" marking information, 637 // _vo == UseMarkWord -> use mark word from object header. 638 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 639 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 640 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) 641 { } 642 643 void set_containing_obj(oop obj) { 644 _containing_obj = obj; 645 } 646 647 bool failures() { return _failures; } 648 int n_failures() { return _n_failures; } 649 650 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 651 virtual void do_oop( oop* p) { do_oop_work(p); } 652 653 void print_object(outputStream* out, oop obj) { 654 #ifdef PRODUCT 655 Klass* k = obj->klass(); 656 const char* class_name = k->external_name(); 657 out->print_cr("class name %s", class_name); 658 #else // PRODUCT 659 obj->print_on(out); 660 #endif // PRODUCT 661 } 662 663 template <class T> 664 void do_oop_work(T* p) { 665 assert(_containing_obj != NULL, "Precondition"); 666 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 667 "Precondition"); 668 T heap_oop = oopDesc::load_heap_oop(p); 669 LogHandle(gc, verify) log; 670 if (!oopDesc::is_null(heap_oop)) { 671 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 672 bool failed = false; 673 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 674 MutexLockerEx x(ParGCRareEvent_lock, 675 Mutex::_no_safepoint_check_flag); 676 677 if (!_failures) { 678 log.info("----------"); 679 } 680 ResourceMark rm; 681 if (!_g1h->is_in_closed_subset(obj)) { 682 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 683 log.info("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 684 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 685 print_object(log.info_stream(), _containing_obj); 686 log.info("points to obj " PTR_FORMAT " not in the heap", p2i(obj)); 687 } else { 688 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 689 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 690 log.info("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 691 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 692 print_object(log.info_stream(), _containing_obj); 693 log.info("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 694 p2i(obj), p2i(to->bottom()), p2i(to->end())); 695 print_object(log.info_stream(), obj); 696 } 697 log.info("----------"); 698 _failures = true; 699 failed = true; 700 _n_failures++; 701 } 702 703 if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) { 704 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 705 HeapRegion* to = _g1h->heap_region_containing(obj); 706 if (from != NULL && to != NULL && 707 from != to && 708 !to->is_pinned()) { 709 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 710 jbyte cv_field = *_bs->byte_for_const(p); 711 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 712 713 bool is_bad = !(from->is_young() 714 || to->rem_set()->contains_reference(p) 715 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 716 (_containing_obj->is_objArray() ? 717 cv_field == dirty 718 : cv_obj == dirty || cv_field == dirty)); 719 if (is_bad) { 720 MutexLockerEx x(ParGCRareEvent_lock, 721 Mutex::_no_safepoint_check_flag); 722 723 if (!_failures) { 724 log.info("----------"); 725 } 726 log.info("Missing rem set entry:"); 727 log.info("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT, 728 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 729 ResourceMark rm; 730 _containing_obj->print_on(log.info_stream()); 731 log.info("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to)); 732 obj->print_on(log.info_stream()); 733 log.info("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 734 log.info("----------"); 735 _failures = true; 736 if (!failed) _n_failures++; 737 } 738 } 739 } 740 } 741 } 742 }; 743 744 // This really ought to be commoned up into OffsetTableContigSpace somehow. 745 // We would need a mechanism to make that code skip dead objects. 746 747 void HeapRegion::verify(VerifyOption vo, 748 bool* failures) const { 749 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 750 *failures = false; 751 HeapWord* p = bottom(); 752 HeapWord* prev_p = NULL; 753 VerifyLiveClosure vl_cl(g1, vo); 754 bool is_region_humongous = is_humongous(); 755 size_t object_num = 0; 756 while (p < top()) { 757 oop obj = oop(p); 758 size_t obj_size = block_size(p); 759 object_num += 1; 760 761 if (is_region_humongous != g1->is_humongous(obj_size) && 762 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. 763 log_info(gc, verify)("obj " PTR_FORMAT " is of %shumongous size (" 764 SIZE_FORMAT " words) in a %shumongous region", 765 p2i(p), g1->is_humongous(obj_size) ? "" : "non-", 766 obj_size, is_region_humongous ? "" : "non-"); 767 *failures = true; 768 return; 769 } 770 771 if (!g1->is_obj_dead_cond(obj, this, vo)) { 772 if (obj->is_oop()) { 773 Klass* klass = obj->klass(); 774 bool is_metaspace_object = Metaspace::contains(klass) || 775 (vo == VerifyOption_G1UsePrevMarking && 776 ClassLoaderDataGraph::unload_list_contains(klass)); 777 if (!is_metaspace_object) { 778 log_info(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 779 "not metadata", p2i(klass), p2i(obj)); 780 *failures = true; 781 return; 782 } else if (!klass->is_klass()) { 783 log_info(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 784 "not a klass", p2i(klass), p2i(obj)); 785 *failures = true; 786 return; 787 } else { 788 vl_cl.set_containing_obj(obj); 789 obj->oop_iterate_no_header(&vl_cl); 790 if (vl_cl.failures()) { 791 *failures = true; 792 } 793 if (G1MaxVerifyFailures >= 0 && 794 vl_cl.n_failures() >= G1MaxVerifyFailures) { 795 return; 796 } 797 } 798 } else { 799 log_info(gc, verify)(PTR_FORMAT " no an oop", p2i(obj)); 800 *failures = true; 801 return; 802 } 803 } 804 prev_p = p; 805 p += obj_size; 806 } 807 808 if (!is_young() && !is_empty()) { 809 _offsets.verify(); 810 } 811 812 if (p != top()) { 813 log_info(gc, verify)("end of last object " PTR_FORMAT " " 814 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 815 *failures = true; 816 return; 817 } 818 819 HeapWord* the_end = end(); 820 assert(p == top(), "it should still hold"); 821 // Do some extra BOT consistency checking for addresses in the 822 // range [top, end). BOT look-ups in this range should yield 823 // top. No point in doing that if top == end (there's nothing there). 824 if (p < the_end) { 825 // Look up top 826 HeapWord* addr_1 = p; 827 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 828 if (b_start_1 != p) { 829 log_info(gc, verify)("BOT look up for top: " PTR_FORMAT " " 830 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 831 p2i(addr_1), p2i(b_start_1), p2i(p)); 832 *failures = true; 833 return; 834 } 835 836 // Look up top + 1 837 HeapWord* addr_2 = p + 1; 838 if (addr_2 < the_end) { 839 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 840 if (b_start_2 != p) { 841 log_info(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 842 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 843 p2i(addr_2), p2i(b_start_2), p2i(p)); 844 *failures = true; 845 return; 846 } 847 } 848 849 // Look up an address between top and end 850 size_t diff = pointer_delta(the_end, p) / 2; 851 HeapWord* addr_3 = p + diff; 852 if (addr_3 < the_end) { 853 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 854 if (b_start_3 != p) { 855 log_info(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 856 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 857 p2i(addr_3), p2i(b_start_3), p2i(p)); 858 *failures = true; 859 return; 860 } 861 } 862 863 // Look up end - 1 864 HeapWord* addr_4 = the_end - 1; 865 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 866 if (b_start_4 != p) { 867 log_info(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 868 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 869 p2i(addr_4), p2i(b_start_4), p2i(p)); 870 *failures = true; 871 return; 872 } 873 } 874 875 if (is_region_humongous && object_num > 1) { 876 log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is humongous " 877 "but has " SIZE_FORMAT ", objects", 878 p2i(bottom()), p2i(end()), object_num); 879 *failures = true; 880 return; 881 } 882 883 verify_strong_code_roots(vo, failures); 884 } 885 886 void HeapRegion::verify() const { 887 bool dummy = false; 888 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 889 } 890 891 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 892 scan_and_forward(this, cp); 893 } 894 895 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 896 // away eventually. 897 898 void G1OffsetTableContigSpace::clear(bool mangle_space) { 899 set_top(bottom()); 900 _scan_top = bottom(); 901 CompactibleSpace::clear(mangle_space); 902 reset_bot(); 903 } 904 905 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 906 Space::set_bottom(new_bottom); 907 _offsets.set_bottom(new_bottom); 908 } 909 910 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 911 Space::set_end(new_end); 912 _offsets.resize(new_end - bottom()); 913 } 914 915 #ifndef PRODUCT 916 void G1OffsetTableContigSpace::mangle_unused_area() { 917 mangle_unused_area_complete(); 918 } 919 920 void G1OffsetTableContigSpace::mangle_unused_area_complete() { 921 SpaceMangler::mangle_region(MemRegion(top(), end())); 922 } 923 #endif 924 925 void G1OffsetTableContigSpace::print() const { 926 print_short(); 927 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 928 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 929 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); 930 } 931 932 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 933 return _offsets.initialize_threshold(); 934 } 935 936 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 937 HeapWord* end) { 938 _offsets.alloc_block(start, end); 939 return _offsets.threshold(); 940 } 941 942 HeapWord* G1OffsetTableContigSpace::scan_top() const { 943 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 944 HeapWord* local_top = top(); 945 OrderAccess::loadload(); 946 const unsigned local_time_stamp = _gc_time_stamp; 947 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); 948 if (local_time_stamp < g1h->get_gc_time_stamp()) { 949 return local_top; 950 } else { 951 return _scan_top; 952 } 953 } 954 955 void G1OffsetTableContigSpace::record_timestamp() { 956 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 957 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 958 959 if (_gc_time_stamp < curr_gc_time_stamp) { 960 // Setting the time stamp here tells concurrent readers to look at 961 // scan_top to know the maximum allowed address to look at. 962 963 // scan_top should be bottom for all regions except for the 964 // retained old alloc region which should have scan_top == top 965 HeapWord* st = _scan_top; 966 guarantee(st == _bottom || st == _top, "invariant"); 967 968 _gc_time_stamp = curr_gc_time_stamp; 969 } 970 } 971 972 void G1OffsetTableContigSpace::record_retained_region() { 973 // scan_top is the maximum address where it's safe for the next gc to 974 // scan this region. 975 _scan_top = top(); 976 } 977 978 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { 979 object_iterate(blk); 980 } 981 982 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { 983 HeapWord* p = bottom(); 984 while (p < top()) { 985 if (block_is_obj(p)) { 986 blk->do_object(oop(p)); 987 } 988 p += block_size(p); 989 } 990 } 991 992 G1OffsetTableContigSpace:: 993 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 994 MemRegion mr) : 995 _offsets(sharedOffsetArray, mr), 996 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 997 _gc_time_stamp(0) 998 { 999 _offsets.set_space(this); 1000 } 1001 1002 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1003 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1004 _top = bottom(); 1005 _scan_top = bottom(); 1006 set_saved_mark_word(NULL); 1007 reset_bot(); 1008 } 1009