1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 33 #include "memory/genOopClosures.inline.hpp" 34 #include "memory/iterator.hpp" 35 #include "oops/oop.inline.hpp" 36 37 int HeapRegion::LogOfHRGrainBytes = 0; 38 int HeapRegion::LogOfHRGrainWords = 0; 39 size_t HeapRegion::GrainBytes = 0; 40 size_t HeapRegion::GrainWords = 0; 41 size_t HeapRegion::CardsPerRegion = 0; 42 43 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 44 HeapRegion* hr, ExtendedOopClosure* cl, 45 CardTableModRefBS::PrecisionStyle precision, 46 FilterKind fk) : 47 ContiguousSpaceDCTOC(hr, cl, precision, NULL), 48 _hr(hr), _fk(fk), _g1(g1) { } 49 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 51 OopClosure* oc) : 52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 53 54 template<class ClosureType> 55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 56 HeapRegion* hr, 57 HeapWord* cur, HeapWord* top) { 58 oop cur_oop = oop(cur); 59 int oop_size = cur_oop->size(); 60 HeapWord* next_obj = cur + oop_size; 61 while (next_obj < top) { 62 // Keep filtering the remembered set. 63 if (!g1h->is_obj_dead(cur_oop, hr)) { 64 // Bottom lies entirely below top, so we can call the 65 // non-memRegion version of oop_iterate below. 66 cur_oop->oop_iterate(cl); 67 } 68 cur = next_obj; 69 cur_oop = oop(cur); 70 oop_size = cur_oop->size(); 71 next_obj = cur + oop_size; 72 } 73 return cur; 74 } 75 76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, 77 HeapWord* bottom, 78 HeapWord* top, 79 ExtendedOopClosure* cl) { 80 G1CollectedHeap* g1h = _g1; 81 int oop_size; 82 ExtendedOopClosure* cl2 = NULL; 83 84 FilterIntoCSClosure intoCSFilt(this, g1h, cl); 85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); 86 87 switch (_fk) { 88 case NoFilterKind: cl2 = cl; break; 89 case IntoCSFilterKind: cl2 = &intoCSFilt; break; 90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; 91 default: ShouldNotReachHere(); 92 } 93 94 // Start filtering what we add to the remembered set. If the object is 95 // not considered dead, either because it is marked (in the mark bitmap) 96 // or it was allocated after marking finished, then we add it. Otherwise 97 // we can safely ignore the object. 98 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 99 oop_size = oop(bottom)->oop_iterate(cl2, mr); 100 } else { 101 oop_size = oop(bottom)->size(); 102 } 103 104 bottom += oop_size; 105 106 if (bottom < top) { 107 // We replicate the loop below for several kinds of possible filters. 108 switch (_fk) { 109 case NoFilterKind: 110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); 111 break; 112 113 case IntoCSFilterKind: { 114 FilterIntoCSClosure filt(this, g1h, cl); 115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 116 break; 117 } 118 119 case OutOfRegionFilterKind: { 120 FilterOutOfRegionClosure filt(_hr, cl); 121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 122 break; 123 } 124 125 default: 126 ShouldNotReachHere(); 127 } 128 129 // Last object. Need to do dead-obj filtering here too. 130 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 131 oop(bottom)->oop_iterate(cl2, mr); 132 } 133 } 134 } 135 136 // Minimum region size; we won't go lower than that. 137 // We might want to decrease this in the future, to deal with small 138 // heaps a bit more efficiently. 139 #define MIN_REGION_SIZE ( 1024 * 1024 ) 140 141 // Maximum region size; we don't go higher than that. There's a good 142 // reason for having an upper bound. We don't want regions to get too 143 // large, otherwise cleanup's effectiveness would decrease as there 144 // will be fewer opportunities to find totally empty regions after 145 // marking. 146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) 147 148 // The automatic region size calculation will try to have around this 149 // many regions in the heap (based on the min heap size). 150 #define TARGET_REGION_NUMBER 2048 151 152 void HeapRegion::setup_heap_region_size(uintx min_heap_size) { 153 // region_size in bytes 154 uintx region_size = G1HeapRegionSize; 155 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 156 // We base the automatic calculation on the min heap size. This 157 // can be problematic if the spread between min and max is quite 158 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on 159 // the max size, the region size might be way too large for the 160 // min size. Either way, some users might have to set the region 161 // size manually for some -Xms / -Xmx combos. 162 163 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, 164 (uintx) MIN_REGION_SIZE); 165 } 166 167 int region_size_log = log2_long((jlong) region_size); 168 // Recalculate the region size to make sure it's a power of 169 // 2. This means that region_size is the largest power of 2 that's 170 // <= what we've calculated so far. 171 region_size = ((uintx)1 << region_size_log); 172 173 // Now make sure that we don't go over or under our limits. 174 if (region_size < MIN_REGION_SIZE) { 175 region_size = MIN_REGION_SIZE; 176 } else if (region_size > MAX_REGION_SIZE) { 177 region_size = MAX_REGION_SIZE; 178 } 179 180 // And recalculate the log. 181 region_size_log = log2_long((jlong) region_size); 182 183 // Now, set up the globals. 184 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 185 LogOfHRGrainBytes = region_size_log; 186 187 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 188 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 189 190 guarantee(GrainBytes == 0, "we should only set it once"); 191 // The cast to int is safe, given that we've bounded region_size by 192 // MIN_REGION_SIZE and MAX_REGION_SIZE. 193 GrainBytes = (size_t)region_size; 194 195 guarantee(GrainWords == 0, "we should only set it once"); 196 GrainWords = GrainBytes >> LogHeapWordSize; 197 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 198 199 guarantee(CardsPerRegion == 0, "we should only set it once"); 200 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 201 } 202 203 void HeapRegion::reset_after_compaction() { 204 G1OffsetTableContigSpace::reset_after_compaction(); 205 // After a compaction the mark bitmap is invalid, so we must 206 // treat all objects as being inside the unmarked area. 207 zero_marked_bytes(); 208 init_top_at_mark_start(); 209 } 210 211 void HeapRegion::hr_clear(bool par, bool clear_space) { 212 assert(_humongous_type == NotHumongous, 213 "we should have already filtered out humongous regions"); 214 assert(_humongous_start_region == NULL, 215 "we should have already filtered out humongous regions"); 216 assert(_end == _orig_end, 217 "we should have already filtered out humongous regions"); 218 219 _in_collection_set = false; 220 221 set_young_index_in_cset(-1); 222 uninstall_surv_rate_group(); 223 set_young_type(NotYoung); 224 reset_pre_dummy_top(); 225 226 if (!par) { 227 // If this is parallel, this will be done later. 228 HeapRegionRemSet* hrrs = rem_set(); 229 hrrs->clear(); 230 _claimed = InitialClaimValue; 231 } 232 zero_marked_bytes(); 233 234 _offsets.resize(HeapRegion::GrainWords); 235 init_top_at_mark_start(); 236 if (clear_space) clear(SpaceDecorator::Mangle); 237 } 238 239 void HeapRegion::par_clear() { 240 assert(used() == 0, "the region should have been already cleared"); 241 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 242 HeapRegionRemSet* hrrs = rem_set(); 243 hrrs->clear(); 244 CardTableModRefBS* ct_bs = 245 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 246 ct_bs->clear(MemRegion(bottom(), end())); 247 } 248 249 void HeapRegion::calc_gc_efficiency() { 250 // GC efficiency is the ratio of how much space would be 251 // reclaimed over how long we predict it would take to reclaim it. 252 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 253 G1CollectorPolicy* g1p = g1h->g1_policy(); 254 255 // Retrieve a prediction of the elapsed time for this region for 256 // a mixed gc because the region will only be evacuated during a 257 // mixed gc. 258 double region_elapsed_time_ms = 259 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 260 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 261 } 262 263 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { 264 assert(!isHumongous(), "sanity / pre-condition"); 265 assert(end() == _orig_end, 266 "Should be normal before the humongous object allocation"); 267 assert(top() == bottom(), "should be empty"); 268 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 269 270 _humongous_type = StartsHumongous; 271 _humongous_start_region = this; 272 273 set_end(new_end); 274 _offsets.set_for_starts_humongous(new_top); 275 } 276 277 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { 278 assert(!isHumongous(), "sanity / pre-condition"); 279 assert(end() == _orig_end, 280 "Should be normal before the humongous object allocation"); 281 assert(top() == bottom(), "should be empty"); 282 assert(first_hr->startsHumongous(), "pre-condition"); 283 284 _humongous_type = ContinuesHumongous; 285 _humongous_start_region = first_hr; 286 } 287 288 void HeapRegion::set_notHumongous() { 289 assert(isHumongous(), "pre-condition"); 290 291 if (startsHumongous()) { 292 assert(top() <= end(), "pre-condition"); 293 set_end(_orig_end); 294 if (top() > end()) { 295 // at least one "continues humongous" region after it 296 set_top(end()); 297 } 298 } else { 299 // continues humongous 300 assert(end() == _orig_end, "sanity"); 301 } 302 303 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 304 _humongous_type = NotHumongous; 305 _humongous_start_region = NULL; 306 } 307 308 bool HeapRegion::claimHeapRegion(jint claimValue) { 309 jint current = _claimed; 310 if (current != claimValue) { 311 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); 312 if (res == current) { 313 return true; 314 } 315 } 316 return false; 317 } 318 319 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { 320 HeapWord* low = addr; 321 HeapWord* high = end(); 322 while (low < high) { 323 size_t diff = pointer_delta(high, low); 324 // Must add one below to bias toward the high amount. Otherwise, if 325 // "high" were at the desired value, and "low" were one less, we 326 // would not converge on "high". This is not symmetric, because 327 // we set "high" to a block start, which might be the right one, 328 // which we don't do for "low". 329 HeapWord* middle = low + (diff+1)/2; 330 if (middle == high) return high; 331 HeapWord* mid_bs = block_start_careful(middle); 332 if (mid_bs < addr) { 333 low = middle; 334 } else { 335 high = mid_bs; 336 } 337 } 338 assert(low == high && low >= addr, "Didn't work."); 339 return low; 340 } 341 342 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 343 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 344 #endif // _MSC_VER 345 346 347 HeapRegion::HeapRegion(uint hrs_index, 348 G1BlockOffsetSharedArray* sharedOffsetArray, 349 MemRegion mr) : 350 G1OffsetTableContigSpace(sharedOffsetArray, mr), 351 _hrs_index(hrs_index), 352 _humongous_type(NotHumongous), _humongous_start_region(NULL), 353 _in_collection_set(false), 354 _next_in_special_set(NULL), _orig_end(NULL), 355 _claimed(InitialClaimValue), _evacuation_failed(false), 356 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 357 _young_type(NotYoung), _next_young_region(NULL), 358 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), 359 #ifdef ASSERT 360 _containing_set(NULL), 361 #endif // ASSERT 362 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 363 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 364 _predicted_bytes_to_copy(0) 365 { 366 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 367 _orig_end = mr.end(); 368 // Note that initialize() will set the start of the unmarked area of the 369 // region. 370 hr_clear(false /*par*/, false /*clear_space*/); 371 set_top(bottom()); 372 set_saved_mark(); 373 374 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 375 } 376 377 CompactibleSpace* HeapRegion::next_compaction_space() const { 378 // We're not using an iterator given that it will wrap around when 379 // it reaches the last region and this is not what we want here. 380 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 381 uint index = hrs_index() + 1; 382 while (index < g1h->n_regions()) { 383 HeapRegion* hr = g1h->region_at(index); 384 if (!hr->isHumongous()) { 385 return hr; 386 } 387 index += 1; 388 } 389 return NULL; 390 } 391 392 void HeapRegion::save_marks() { 393 set_saved_mark(); 394 } 395 396 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { 397 HeapWord* p = mr.start(); 398 HeapWord* e = mr.end(); 399 oop obj; 400 while (p < e) { 401 obj = oop(p); 402 p += obj->oop_iterate(cl); 403 } 404 assert(p == e, "bad memregion: doesn't end on obj boundary"); 405 } 406 407 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 408 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 409 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ 410 } 411 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) 412 413 414 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { 415 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); 416 } 417 418 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 419 bool during_conc_mark) { 420 // We always recreate the prev marking info and we'll explicitly 421 // mark all objects we find to be self-forwarded on the prev 422 // bitmap. So all objects need to be below PTAMS. 423 _prev_top_at_mark_start = top(); 424 _prev_marked_bytes = 0; 425 426 if (during_initial_mark) { 427 // During initial-mark, we'll also explicitly mark all objects 428 // we find to be self-forwarded on the next bitmap. So all 429 // objects need to be below NTAMS. 430 _next_top_at_mark_start = top(); 431 _next_marked_bytes = 0; 432 } else if (during_conc_mark) { 433 // During concurrent mark, all objects in the CSet (including 434 // the ones we find to be self-forwarded) are implicitly live. 435 // So all objects need to be above NTAMS. 436 _next_top_at_mark_start = bottom(); 437 _next_marked_bytes = 0; 438 } 439 } 440 441 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 442 bool during_conc_mark, 443 size_t marked_bytes) { 444 assert(0 <= marked_bytes && marked_bytes <= used(), 445 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, 446 marked_bytes, used())); 447 _prev_marked_bytes = marked_bytes; 448 } 449 450 HeapWord* 451 HeapRegion::object_iterate_mem_careful(MemRegion mr, 452 ObjectClosure* cl) { 453 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 454 // We used to use "block_start_careful" here. But we're actually happy 455 // to update the BOT while we do this... 456 HeapWord* cur = block_start(mr.start()); 457 mr = mr.intersection(used_region()); 458 if (mr.is_empty()) return NULL; 459 // Otherwise, find the obj that extends onto mr.start(). 460 461 assert(cur <= mr.start() 462 && (oop(cur)->klass_or_null() == NULL || 463 cur + oop(cur)->size() > mr.start()), 464 "postcondition of block_start"); 465 oop obj; 466 while (cur < mr.end()) { 467 obj = oop(cur); 468 if (obj->klass_or_null() == NULL) { 469 // Ran into an unparseable point. 470 return cur; 471 } else if (!g1h->is_obj_dead(obj)) { 472 cl->do_object(obj); 473 } 474 if (cl->abort()) return cur; 475 // The check above must occur before the operation below, since an 476 // abort might invalidate the "size" operation. 477 cur += obj->size(); 478 } 479 return NULL; 480 } 481 482 HeapWord* 483 HeapRegion:: 484 oops_on_card_seq_iterate_careful(MemRegion mr, 485 FilterOutOfRegionClosure* cl, 486 bool filter_young, 487 jbyte* card_ptr) { 488 // Currently, we should only have to clean the card if filter_young 489 // is true and vice versa. 490 if (filter_young) { 491 assert(card_ptr != NULL, "pre-condition"); 492 } else { 493 assert(card_ptr == NULL, "pre-condition"); 494 } 495 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 496 497 // If we're within a stop-world GC, then we might look at a card in a 498 // GC alloc region that extends onto a GC LAB, which may not be 499 // parseable. Stop such at the "saved_mark" of the region. 500 if (g1h->is_gc_active()) { 501 mr = mr.intersection(used_region_at_save_marks()); 502 } else { 503 mr = mr.intersection(used_region()); 504 } 505 if (mr.is_empty()) return NULL; 506 // Otherwise, find the obj that extends onto mr.start(). 507 508 // The intersection of the incoming mr (for the card) and the 509 // allocated part of the region is non-empty. This implies that 510 // we have actually allocated into this region. The code in 511 // G1CollectedHeap.cpp that allocates a new region sets the 512 // is_young tag on the region before allocating. Thus we 513 // safely know if this region is young. 514 if (is_young() && filter_young) { 515 return NULL; 516 } 517 518 assert(!is_young(), "check value of filter_young"); 519 520 // We can only clean the card here, after we make the decision that 521 // the card is not young. And we only clean the card if we have been 522 // asked to (i.e., card_ptr != NULL). 523 if (card_ptr != NULL) { 524 *card_ptr = CardTableModRefBS::clean_card_val(); 525 // We must complete this write before we do any of the reads below. 526 OrderAccess::storeload(); 527 } 528 529 // Cache the boundaries of the memory region in some const locals 530 HeapWord* const start = mr.start(); 531 HeapWord* const end = mr.end(); 532 533 // We used to use "block_start_careful" here. But we're actually happy 534 // to update the BOT while we do this... 535 HeapWord* cur = block_start(start); 536 assert(cur <= start, "Postcondition"); 537 538 oop obj; 539 540 HeapWord* next = cur; 541 while (next <= start) { 542 cur = next; 543 obj = oop(cur); 544 if (obj->klass_or_null() == NULL) { 545 // Ran into an unparseable point. 546 return cur; 547 } 548 // Otherwise... 549 next = (cur + obj->size()); 550 } 551 552 // If we finish the above loop...We have a parseable object that 553 // begins on or before the start of the memory region, and ends 554 // inside or spans the entire region. 555 556 assert(obj == oop(cur), "sanity"); 557 assert(cur <= start && 558 obj->klass_or_null() != NULL && 559 (cur + obj->size()) > start, 560 "Loop postcondition"); 561 562 if (!g1h->is_obj_dead(obj)) { 563 obj->oop_iterate(cl, mr); 564 } 565 566 while (cur < end) { 567 obj = oop(cur); 568 if (obj->klass_or_null() == NULL) { 569 // Ran into an unparseable point. 570 return cur; 571 }; 572 573 // Otherwise: 574 next = (cur + obj->size()); 575 576 if (!g1h->is_obj_dead(obj)) { 577 if (next < end || !obj->is_objArray()) { 578 // This object either does not span the MemRegion 579 // boundary, or if it does it's not an array. 580 // Apply closure to whole object. 581 obj->oop_iterate(cl); 582 } else { 583 // This obj is an array that spans the boundary. 584 // Stop at the boundary. 585 obj->oop_iterate(cl, mr); 586 } 587 } 588 cur = next; 589 } 590 return NULL; 591 } 592 593 // Code roots support 594 595 void HeapRegion::add_strong_code_root(nmethod* nm) { 596 HeapRegionRemSet* hrrs = rem_set(); 597 hrrs->add_strong_code_root(nm); 598 } 599 600 void HeapRegion::remove_strong_code_root(nmethod* nm) { 601 HeapRegionRemSet* hrrs = rem_set(); 602 hrrs->remove_strong_code_root(nm); 603 } 604 605 void HeapRegion::migrate_strong_code_roots() { 606 assert(in_collection_set(), "only collection set regions"); 607 assert(!isHumongous(), "not humongous regions"); 608 609 HeapRegionRemSet* hrrs = rem_set(); 610 hrrs->migrate_strong_code_roots(); 611 } 612 613 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 614 HeapRegionRemSet* hrrs = rem_set(); 615 hrrs->strong_code_roots_do(blk); 616 } 617 618 class VerifyStrongCodeRootOopClosure: public OopClosure { 619 const HeapRegion* _hr; 620 nmethod* _nm; 621 bool _failures; 622 bool _has_oops_in_region; 623 624 template <class T> void do_oop_work(T* p) { 625 T heap_oop = oopDesc::load_heap_oop(p); 626 if (!oopDesc::is_null(heap_oop)) { 627 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 628 629 // Note: not all the oops embedded in the nmethod are in the 630 // current region. We only look at those which are. 631 if (_hr->is_in(obj)) { 632 // Object is in the region. Check that its less than top 633 if (_hr->top() <= (HeapWord*)obj) { 634 // Object is above top 635 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " 636 "["PTR_FORMAT", "PTR_FORMAT") is above " 637 "top "PTR_FORMAT, 638 obj, _hr->bottom(), _hr->end(), _hr->top()); 639 _failures = true; 640 return; 641 } 642 // Nmethod has at least one oop in the current region 643 _has_oops_in_region = true; 644 } 645 } 646 } 647 648 public: 649 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 650 _hr(hr), _failures(false), _has_oops_in_region(false) {} 651 652 void do_oop(narrowOop* p) { do_oop_work(p); } 653 void do_oop(oop* p) { do_oop_work(p); } 654 655 bool failures() { return _failures; } 656 bool has_oops_in_region() { return _has_oops_in_region; } 657 }; 658 659 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 660 const HeapRegion* _hr; 661 bool _failures; 662 public: 663 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 664 _hr(hr), _failures(false) {} 665 666 void do_code_blob(CodeBlob* cb) { 667 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 668 if (nm != NULL) { 669 // Verify that the nemthod is live 670 if (!nm->is_alive()) { 671 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " 672 PTR_FORMAT" in its strong code roots", 673 _hr->bottom(), _hr->end(), nm); 674 _failures = true; 675 } else { 676 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 677 nm->oops_do(&oop_cl); 678 if (!oop_cl.has_oops_in_region()) { 679 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " 680 PTR_FORMAT" in its strong code roots " 681 "with no pointers into region", 682 _hr->bottom(), _hr->end(), nm); 683 _failures = true; 684 } else if (oop_cl.failures()) { 685 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " 686 "failures for nmethod "PTR_FORMAT, 687 _hr->bottom(), _hr->end(), nm); 688 _failures = true; 689 } 690 } 691 } 692 } 693 694 bool failures() { return _failures; } 695 }; 696 697 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 698 if (!G1VerifyHeapRegionCodeRoots) { 699 // We're not verifying code roots. 700 return; 701 } 702 if (vo == VerifyOption_G1UseMarkWord) { 703 // Marking verification during a full GC is performed after class 704 // unloading, code cache unloading, etc so the strong code roots 705 // attached to each heap region are in an inconsistent state. They won't 706 // be consistent until the strong code roots are rebuilt after the 707 // actual GC. Skip verifying the strong code roots in this particular 708 // time. 709 assert(VerifyDuringGC, "only way to get here"); 710 return; 711 } 712 713 HeapRegionRemSet* hrrs = rem_set(); 714 int strong_code_roots_length = hrrs->strong_code_roots_list_length(); 715 716 // if this region is empty then there should be no entries 717 // on its strong code root list 718 if (is_empty()) { 719 if (strong_code_roots_length > 0) { 720 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " 721 "but has "INT32_FORMAT" code root entries", 722 bottom(), end(), strong_code_roots_length); 723 *failures = true; 724 } 725 return; 726 } 727 728 // An H-region should have an empty strong code root list 729 if (isHumongous()) { 730 if (strong_code_roots_length > 0) { 731 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 732 "but has "INT32_FORMAT" code root entries", 733 bottom(), end(), strong_code_roots_length); 734 *failures = true; 735 } 736 return; 737 } 738 739 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 740 strong_code_roots_do(&cb_cl); 741 742 if (cb_cl.failures()) { 743 *failures = true; 744 } 745 } 746 747 void HeapRegion::print() const { print_on(gclog_or_tty); } 748 void HeapRegion::print_on(outputStream* st) const { 749 if (isHumongous()) { 750 if (startsHumongous()) 751 st->print(" HS"); 752 else 753 st->print(" HC"); 754 } else { 755 st->print(" "); 756 } 757 if (in_collection_set()) 758 st->print(" CS"); 759 else 760 st->print(" "); 761 if (is_young()) 762 st->print(is_survivor() ? " SU" : " Y "); 763 else 764 st->print(" "); 765 if (is_empty()) 766 st->print(" F"); 767 else 768 st->print(" "); 769 st->print(" TS %5d", _gc_time_stamp); 770 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 771 prev_top_at_mark_start(), next_top_at_mark_start()); 772 G1OffsetTableContigSpace::print_on(st); 773 } 774 775 class VerifyLiveClosure: public OopClosure { 776 private: 777 G1CollectedHeap* _g1h; 778 CardTableModRefBS* _bs; 779 oop _containing_obj; 780 bool _failures; 781 int _n_failures; 782 VerifyOption _vo; 783 public: 784 // _vo == UsePrevMarking -> use "prev" marking information, 785 // _vo == UseNextMarking -> use "next" marking information, 786 // _vo == UseMarkWord -> use mark word from object header. 787 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 788 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 789 _failures(false), _n_failures(0), _vo(vo) 790 { 791 BarrierSet* bs = _g1h->barrier_set(); 792 if (bs->is_a(BarrierSet::CardTableModRef)) 793 _bs = (CardTableModRefBS*)bs; 794 } 795 796 void set_containing_obj(oop obj) { 797 _containing_obj = obj; 798 } 799 800 bool failures() { return _failures; } 801 int n_failures() { return _n_failures; } 802 803 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 804 virtual void do_oop( oop* p) { do_oop_work(p); } 805 806 void print_object(outputStream* out, oop obj) { 807 #ifdef PRODUCT 808 Klass* k = obj->klass(); 809 const char* class_name = InstanceKlass::cast(k)->external_name(); 810 out->print_cr("class name %s", class_name); 811 #else // PRODUCT 812 obj->print_on(out); 813 #endif // PRODUCT 814 } 815 816 template <class T> 817 void do_oop_work(T* p) { 818 assert(_containing_obj != NULL, "Precondition"); 819 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 820 "Precondition"); 821 T heap_oop = oopDesc::load_heap_oop(p); 822 if (!oopDesc::is_null(heap_oop)) { 823 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 824 bool failed = false; 825 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 826 MutexLockerEx x(ParGCRareEvent_lock, 827 Mutex::_no_safepoint_check_flag); 828 829 if (!_failures) { 830 gclog_or_tty->print_cr(""); 831 gclog_or_tty->print_cr("----------"); 832 } 833 if (!_g1h->is_in_closed_subset(obj)) { 834 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 835 gclog_or_tty->print_cr("Field "PTR_FORMAT 836 " of live obj "PTR_FORMAT" in region " 837 "["PTR_FORMAT", "PTR_FORMAT")", 838 p, (void*) _containing_obj, 839 from->bottom(), from->end()); 840 print_object(gclog_or_tty, _containing_obj); 841 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", 842 (void*) obj); 843 } else { 844 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 845 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 846 gclog_or_tty->print_cr("Field "PTR_FORMAT 847 " of live obj "PTR_FORMAT" in region " 848 "["PTR_FORMAT", "PTR_FORMAT")", 849 p, (void*) _containing_obj, 850 from->bottom(), from->end()); 851 print_object(gclog_or_tty, _containing_obj); 852 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " 853 "["PTR_FORMAT", "PTR_FORMAT")", 854 (void*) obj, to->bottom(), to->end()); 855 print_object(gclog_or_tty, obj); 856 } 857 gclog_or_tty->print_cr("----------"); 858 gclog_or_tty->flush(); 859 _failures = true; 860 failed = true; 861 _n_failures++; 862 } 863 864 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { 865 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 866 HeapRegion* to = _g1h->heap_region_containing(obj); 867 if (from != NULL && to != NULL && 868 from != to && 869 !to->isHumongous()) { 870 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 871 jbyte cv_field = *_bs->byte_for_const(p); 872 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 873 874 bool is_bad = !(from->is_young() 875 || to->rem_set()->contains_reference(p) 876 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 877 (_containing_obj->is_objArray() ? 878 cv_field == dirty 879 : cv_obj == dirty || cv_field == dirty)); 880 if (is_bad) { 881 MutexLockerEx x(ParGCRareEvent_lock, 882 Mutex::_no_safepoint_check_flag); 883 884 if (!_failures) { 885 gclog_or_tty->print_cr(""); 886 gclog_or_tty->print_cr("----------"); 887 } 888 gclog_or_tty->print_cr("Missing rem set entry:"); 889 gclog_or_tty->print_cr("Field "PTR_FORMAT" " 890 "of obj "PTR_FORMAT", " 891 "in region "HR_FORMAT, 892 p, (void*) _containing_obj, 893 HR_FORMAT_PARAMS(from)); 894 _containing_obj->print_on(gclog_or_tty); 895 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " 896 "in region "HR_FORMAT, 897 (void*) obj, 898 HR_FORMAT_PARAMS(to)); 899 obj->print_on(gclog_or_tty); 900 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 901 cv_obj, cv_field); 902 gclog_or_tty->print_cr("----------"); 903 gclog_or_tty->flush(); 904 _failures = true; 905 if (!failed) _n_failures++; 906 } 907 } 908 } 909 } 910 } 911 }; 912 913 // This really ought to be commoned up into OffsetTableContigSpace somehow. 914 // We would need a mechanism to make that code skip dead objects. 915 916 void HeapRegion::verify(VerifyOption vo, 917 bool* failures) const { 918 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 919 *failures = false; 920 HeapWord* p = bottom(); 921 HeapWord* prev_p = NULL; 922 VerifyLiveClosure vl_cl(g1, vo); 923 bool is_humongous = isHumongous(); 924 bool do_bot_verify = !is_young(); 925 size_t object_num = 0; 926 while (p < top()) { 927 oop obj = oop(p); 928 size_t obj_size = obj->size(); 929 object_num += 1; 930 931 if (is_humongous != g1->isHumongous(obj_size)) { 932 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 933 SIZE_FORMAT" words) in a %shumongous region", 934 p, g1->isHumongous(obj_size) ? "" : "non-", 935 obj_size, is_humongous ? "" : "non-"); 936 *failures = true; 937 return; 938 } 939 940 // If it returns false, verify_for_object() will output the 941 // appropriate messasge. 942 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) { 943 *failures = true; 944 return; 945 } 946 947 if (!g1->is_obj_dead_cond(obj, this, vo)) { 948 if (obj->is_oop()) { 949 Klass* klass = obj->klass(); 950 if (!klass->is_metadata()) { 951 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 952 "not metadata", klass, obj); 953 *failures = true; 954 return; 955 } else if (!klass->is_klass()) { 956 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 957 "not a klass", klass, obj); 958 *failures = true; 959 return; 960 } else { 961 vl_cl.set_containing_obj(obj); 962 obj->oop_iterate_no_header(&vl_cl); 963 if (vl_cl.failures()) { 964 *failures = true; 965 } 966 if (G1MaxVerifyFailures >= 0 && 967 vl_cl.n_failures() >= G1MaxVerifyFailures) { 968 return; 969 } 970 } 971 } else { 972 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); 973 *failures = true; 974 return; 975 } 976 } 977 prev_p = p; 978 p += obj_size; 979 } 980 981 if (p != top()) { 982 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " 983 "does not match top "PTR_FORMAT, p, top()); 984 *failures = true; 985 return; 986 } 987 988 HeapWord* the_end = end(); 989 assert(p == top(), "it should still hold"); 990 // Do some extra BOT consistency checking for addresses in the 991 // range [top, end). BOT look-ups in this range should yield 992 // top. No point in doing that if top == end (there's nothing there). 993 if (p < the_end) { 994 // Look up top 995 HeapWord* addr_1 = p; 996 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 997 if (b_start_1 != p) { 998 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " 999 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1000 addr_1, b_start_1, p); 1001 *failures = true; 1002 return; 1003 } 1004 1005 // Look up top + 1 1006 HeapWord* addr_2 = p + 1; 1007 if (addr_2 < the_end) { 1008 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 1009 if (b_start_2 != p) { 1010 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " 1011 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1012 addr_2, b_start_2, p); 1013 *failures = true; 1014 return; 1015 } 1016 } 1017 1018 // Look up an address between top and end 1019 size_t diff = pointer_delta(the_end, p) / 2; 1020 HeapWord* addr_3 = p + diff; 1021 if (addr_3 < the_end) { 1022 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 1023 if (b_start_3 != p) { 1024 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " 1025 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1026 addr_3, b_start_3, p); 1027 *failures = true; 1028 return; 1029 } 1030 } 1031 1032 // Loook up end - 1 1033 HeapWord* addr_4 = the_end - 1; 1034 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 1035 if (b_start_4 != p) { 1036 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " 1037 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1038 addr_4, b_start_4, p); 1039 *failures = true; 1040 return; 1041 } 1042 } 1043 1044 if (is_humongous && object_num > 1) { 1045 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 1046 "but has "SIZE_FORMAT", objects", 1047 bottom(), end(), object_num); 1048 *failures = true; 1049 return; 1050 } 1051 1052 verify_strong_code_roots(vo, failures); 1053 } 1054 1055 void HeapRegion::verify() const { 1056 bool dummy = false; 1057 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 1058 } 1059 1060 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1061 // away eventually. 1062 1063 void G1OffsetTableContigSpace::clear(bool mangle_space) { 1064 ContiguousSpace::clear(mangle_space); 1065 _offsets.zero_bottom_entry(); 1066 _offsets.initialize_threshold(); 1067 } 1068 1069 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 1070 Space::set_bottom(new_bottom); 1071 _offsets.set_bottom(new_bottom); 1072 } 1073 1074 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 1075 Space::set_end(new_end); 1076 _offsets.resize(new_end - bottom()); 1077 } 1078 1079 void G1OffsetTableContigSpace::print() const { 1080 print_short(); 1081 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1082 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1083 bottom(), top(), _offsets.threshold(), end()); 1084 } 1085 1086 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 1087 return _offsets.initialize_threshold(); 1088 } 1089 1090 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 1091 HeapWord* end) { 1092 _offsets.alloc_block(start, end); 1093 return _offsets.threshold(); 1094 } 1095 1096 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { 1097 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1098 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1099 if (_gc_time_stamp < g1h->get_gc_time_stamp()) 1100 return top(); 1101 else 1102 return ContiguousSpace::saved_mark_word(); 1103 } 1104 1105 void G1OffsetTableContigSpace::set_saved_mark() { 1106 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1107 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1108 1109 if (_gc_time_stamp < curr_gc_time_stamp) { 1110 // The order of these is important, as another thread might be 1111 // about to start scanning this region. If it does so after 1112 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1113 // will be false, and it will pick up top() as the high water mark 1114 // of region. If it does so after _gc_time_stamp = ..., then it 1115 // will pick up the right saved_mark_word() as the high water mark 1116 // of the region. Either way, the behaviour will be correct. 1117 ContiguousSpace::set_saved_mark(); 1118 OrderAccess::storestore(); 1119 _gc_time_stamp = curr_gc_time_stamp; 1120 // No need to do another barrier to flush the writes above. If 1121 // this is called in parallel with other threads trying to 1122 // allocate into the region, the caller should call this while 1123 // holding a lock and when the lock is released the writes will be 1124 // flushed. 1125 } 1126 } 1127 1128 G1OffsetTableContigSpace:: 1129 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1130 MemRegion mr) : 1131 _offsets(sharedOffsetArray, mr), 1132 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1133 _gc_time_stamp(0) 1134 { 1135 _offsets.set_space(this); 1136 // false ==> we'll do the clearing if there's clearing to be done. 1137 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); 1138 _offsets.zero_bottom_entry(); 1139 _offsets.initialize_threshold(); 1140 }