1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 33 #include "memory/genOopClosures.inline.hpp" 34 #include "memory/iterator.hpp" 35 #include "oops/oop.inline.hpp" 36 37 int HeapRegion::LogOfHRGrainBytes = 0; 38 int HeapRegion::LogOfHRGrainWords = 0; 39 size_t HeapRegion::GrainBytes = 0; 40 size_t HeapRegion::GrainWords = 0; 41 size_t HeapRegion::CardsPerRegion = 0; 42 43 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 44 HeapRegion* hr, ExtendedOopClosure* cl, 45 CardTableModRefBS::PrecisionStyle precision, 46 FilterKind fk) : 47 ContiguousSpaceDCTOC(hr, cl, precision, NULL), 48 _hr(hr), _fk(fk), _g1(g1) { } 49 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 51 OopClosure* oc) : 52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 53 54 template<class ClosureType> 55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 56 HeapRegion* hr, 57 HeapWord* cur, HeapWord* top) { 58 oop cur_oop = oop(cur); 59 int oop_size = cur_oop->size(); 60 HeapWord* next_obj = cur + oop_size; 61 while (next_obj < top) { 62 // Keep filtering the remembered set. 63 if (!g1h->is_obj_dead(cur_oop, hr)) { 64 // Bottom lies entirely below top, so we can call the 65 // non-memRegion version of oop_iterate below. 66 cur_oop->oop_iterate(cl); 67 } 68 cur = next_obj; 69 cur_oop = oop(cur); 70 oop_size = cur_oop->size(); 71 next_obj = cur + oop_size; 72 } 73 return cur; 74 } 75 76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, 77 HeapWord* bottom, 78 HeapWord* top, 79 ExtendedOopClosure* cl) { 80 G1CollectedHeap* g1h = _g1; 81 int oop_size; 82 ExtendedOopClosure* cl2 = NULL; 83 84 FilterIntoCSClosure intoCSFilt(this, g1h, cl); 85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); 86 87 switch (_fk) { 88 case NoFilterKind: cl2 = cl; break; 89 case IntoCSFilterKind: cl2 = &intoCSFilt; break; 90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; 91 default: ShouldNotReachHere(); 92 } 93 94 // Start filtering what we add to the remembered set. If the object is 95 // not considered dead, either because it is marked (in the mark bitmap) 96 // or it was allocated after marking finished, then we add it. Otherwise 97 // we can safely ignore the object. 98 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 99 oop_size = oop(bottom)->oop_iterate(cl2, mr); 100 } else { 101 oop_size = oop(bottom)->size(); 102 } 103 104 bottom += oop_size; 105 106 if (bottom < top) { 107 // We replicate the loop below for several kinds of possible filters. 108 switch (_fk) { 109 case NoFilterKind: 110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); 111 break; 112 113 case IntoCSFilterKind: { 114 FilterIntoCSClosure filt(this, g1h, cl); 115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 116 break; 117 } 118 119 case OutOfRegionFilterKind: { 120 FilterOutOfRegionClosure filt(_hr, cl); 121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 122 break; 123 } 124 125 default: 126 ShouldNotReachHere(); 127 } 128 129 // Last object. Need to do dead-obj filtering here too. 130 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 131 oop(bottom)->oop_iterate(cl2, mr); 132 } 133 } 134 } 135 136 // Minimum region size; we won't go lower than that. 137 // We might want to decrease this in the future, to deal with small 138 // heaps a bit more efficiently. 139 #define MIN_REGION_SIZE ( 1024 * 1024 ) 140 141 // Maximum region size; we don't go higher than that. There's a good 142 // reason for having an upper bound. We don't want regions to get too 143 // large, otherwise cleanup's effectiveness would decrease as there 144 // will be fewer opportunities to find totally empty regions after 145 // marking. 146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) 147 148 // The automatic region size calculation will try to have around this 149 // many regions in the heap (based on the min heap size). 150 #define TARGET_REGION_NUMBER 2048 151 152 size_t HeapRegion::max_region_size() { 153 return (size_t)MAX_REGION_SIZE; 154 } 155 156 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 157 uintx region_size = G1HeapRegionSize; 158 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 159 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 160 region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, 161 (uintx) MIN_REGION_SIZE); 162 } 163 164 int region_size_log = log2_long((jlong) region_size); 165 // Recalculate the region size to make sure it's a power of 166 // 2. This means that region_size is the largest power of 2 that's 167 // <= what we've calculated so far. 168 region_size = ((uintx)1 << region_size_log); 169 170 // Now make sure that we don't go over or under our limits. 171 if (region_size < MIN_REGION_SIZE) { 172 region_size = MIN_REGION_SIZE; 173 } else if (region_size > MAX_REGION_SIZE) { 174 region_size = MAX_REGION_SIZE; 175 } 176 177 // And recalculate the log. 178 region_size_log = log2_long((jlong) region_size); 179 180 // Now, set up the globals. 181 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 182 LogOfHRGrainBytes = region_size_log; 183 184 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 185 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 186 187 guarantee(GrainBytes == 0, "we should only set it once"); 188 // The cast to int is safe, given that we've bounded region_size by 189 // MIN_REGION_SIZE and MAX_REGION_SIZE. 190 GrainBytes = (size_t)region_size; 191 192 guarantee(GrainWords == 0, "we should only set it once"); 193 GrainWords = GrainBytes >> LogHeapWordSize; 194 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 195 196 guarantee(CardsPerRegion == 0, "we should only set it once"); 197 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 198 } 199 200 void HeapRegion::reset_after_compaction() { 201 G1OffsetTableContigSpace::reset_after_compaction(); 202 // After a compaction the mark bitmap is invalid, so we must 203 // treat all objects as being inside the unmarked area. 204 zero_marked_bytes(); 205 init_top_at_mark_start(); 206 } 207 208 void HeapRegion::hr_clear(bool par, bool clear_space) { 209 assert(_humongous_type == NotHumongous, 210 "we should have already filtered out humongous regions"); 211 assert(_humongous_start_region == NULL, 212 "we should have already filtered out humongous regions"); 213 assert(_end == _orig_end, 214 "we should have already filtered out humongous regions"); 215 216 _in_collection_set = false; 217 218 set_young_index_in_cset(-1); 219 uninstall_surv_rate_group(); 220 set_young_type(NotYoung); 221 reset_pre_dummy_top(); 222 223 if (!par) { 224 // If this is parallel, this will be done later. 225 HeapRegionRemSet* hrrs = rem_set(); 226 hrrs->clear(); 227 _claimed = InitialClaimValue; 228 } 229 zero_marked_bytes(); 230 231 _offsets.resize(HeapRegion::GrainWords); 232 init_top_at_mark_start(); 233 if (clear_space) clear(SpaceDecorator::Mangle); 234 } 235 236 void HeapRegion::par_clear() { 237 assert(used() == 0, "the region should have been already cleared"); 238 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 239 HeapRegionRemSet* hrrs = rem_set(); 240 hrrs->clear(); 241 CardTableModRefBS* ct_bs = 242 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 243 ct_bs->clear(MemRegion(bottom(), end())); 244 } 245 246 void HeapRegion::calc_gc_efficiency() { 247 // GC efficiency is the ratio of how much space would be 248 // reclaimed over how long we predict it would take to reclaim it. 249 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 250 G1CollectorPolicy* g1p = g1h->g1_policy(); 251 252 // Retrieve a prediction of the elapsed time for this region for 253 // a mixed gc because the region will only be evacuated during a 254 // mixed gc. 255 double region_elapsed_time_ms = 256 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 257 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 258 } 259 260 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { 261 assert(!isHumongous(), "sanity / pre-condition"); 262 assert(end() == _orig_end, 263 "Should be normal before the humongous object allocation"); 264 assert(top() == bottom(), "should be empty"); 265 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 266 267 _humongous_type = StartsHumongous; 268 _humongous_start_region = this; 269 270 set_end(new_end); 271 _offsets.set_for_starts_humongous(new_top); 272 } 273 274 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { 275 assert(!isHumongous(), "sanity / pre-condition"); 276 assert(end() == _orig_end, 277 "Should be normal before the humongous object allocation"); 278 assert(top() == bottom(), "should be empty"); 279 assert(first_hr->startsHumongous(), "pre-condition"); 280 281 _humongous_type = ContinuesHumongous; 282 _humongous_start_region = first_hr; 283 } 284 285 void HeapRegion::set_notHumongous() { 286 assert(isHumongous(), "pre-condition"); 287 288 if (startsHumongous()) { 289 assert(top() <= end(), "pre-condition"); 290 set_end(_orig_end); 291 if (top() > end()) { 292 // at least one "continues humongous" region after it 293 set_top(end()); 294 } 295 } else { 296 // continues humongous 297 assert(end() == _orig_end, "sanity"); 298 } 299 300 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 301 _humongous_type = NotHumongous; 302 _humongous_start_region = NULL; 303 } 304 305 bool HeapRegion::claimHeapRegion(jint claimValue) { 306 jint current = _claimed; 307 if (current != claimValue) { 308 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); 309 if (res == current) { 310 return true; 311 } 312 } 313 return false; 314 } 315 316 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { 317 HeapWord* low = addr; 318 HeapWord* high = end(); 319 while (low < high) { 320 size_t diff = pointer_delta(high, low); 321 // Must add one below to bias toward the high amount. Otherwise, if 322 // "high" were at the desired value, and "low" were one less, we 323 // would not converge on "high". This is not symmetric, because 324 // we set "high" to a block start, which might be the right one, 325 // which we don't do for "low". 326 HeapWord* middle = low + (diff+1)/2; 327 if (middle == high) return high; 328 HeapWord* mid_bs = block_start_careful(middle); 329 if (mid_bs < addr) { 330 low = middle; 331 } else { 332 high = mid_bs; 333 } 334 } 335 assert(low == high && low >= addr, "Didn't work."); 336 return low; 337 } 338 339 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 340 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 341 #endif // _MSC_VER 342 343 344 HeapRegion::HeapRegion(uint hrs_index, 345 G1BlockOffsetSharedArray* sharedOffsetArray, 346 MemRegion mr) : 347 G1OffsetTableContigSpace(sharedOffsetArray, mr), 348 _hrs_index(hrs_index), 349 _humongous_type(NotHumongous), _humongous_start_region(NULL), 350 _in_collection_set(false), 351 _next_in_special_set(NULL), _orig_end(NULL), 352 _claimed(InitialClaimValue), _evacuation_failed(false), 353 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 354 _young_type(NotYoung), _next_young_region(NULL), 355 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), 356 #ifdef ASSERT 357 _containing_set(NULL), 358 #endif // ASSERT 359 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 360 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 361 _predicted_bytes_to_copy(0) 362 { 363 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 364 _orig_end = mr.end(); 365 // Note that initialize() will set the start of the unmarked area of the 366 // region. 367 hr_clear(false /*par*/, false /*clear_space*/); 368 set_top(bottom()); 369 set_saved_mark(); 370 371 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 372 } 373 374 CompactibleSpace* HeapRegion::next_compaction_space() const { 375 // We're not using an iterator given that it will wrap around when 376 // it reaches the last region and this is not what we want here. 377 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 378 uint index = hrs_index() + 1; 379 while (index < g1h->n_regions()) { 380 HeapRegion* hr = g1h->region_at(index); 381 if (!hr->isHumongous()) { 382 return hr; 383 } 384 index += 1; 385 } 386 return NULL; 387 } 388 389 void HeapRegion::save_marks() { 390 set_saved_mark(); 391 } 392 393 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { 394 HeapWord* p = mr.start(); 395 HeapWord* e = mr.end(); 396 oop obj; 397 while (p < e) { 398 obj = oop(p); 399 p += obj->oop_iterate(cl); 400 } 401 assert(p == e, "bad memregion: doesn't end on obj boundary"); 402 } 403 404 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 405 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 406 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ 407 } 408 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) 409 410 411 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { 412 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); 413 } 414 415 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 416 bool during_conc_mark) { 417 // We always recreate the prev marking info and we'll explicitly 418 // mark all objects we find to be self-forwarded on the prev 419 // bitmap. So all objects need to be below PTAMS. 420 _prev_top_at_mark_start = top(); 421 _prev_marked_bytes = 0; 422 423 if (during_initial_mark) { 424 // During initial-mark, we'll also explicitly mark all objects 425 // we find to be self-forwarded on the next bitmap. So all 426 // objects need to be below NTAMS. 427 _next_top_at_mark_start = top(); 428 _next_marked_bytes = 0; 429 } else if (during_conc_mark) { 430 // During concurrent mark, all objects in the CSet (including 431 // the ones we find to be self-forwarded) are implicitly live. 432 // So all objects need to be above NTAMS. 433 _next_top_at_mark_start = bottom(); 434 _next_marked_bytes = 0; 435 } 436 } 437 438 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 439 bool during_conc_mark, 440 size_t marked_bytes) { 441 assert(0 <= marked_bytes && marked_bytes <= used(), 442 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, 443 marked_bytes, used())); 444 _prev_marked_bytes = marked_bytes; 445 } 446 447 HeapWord* 448 HeapRegion::object_iterate_mem_careful(MemRegion mr, 449 ObjectClosure* cl) { 450 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 451 // We used to use "block_start_careful" here. But we're actually happy 452 // to update the BOT while we do this... 453 HeapWord* cur = block_start(mr.start()); 454 mr = mr.intersection(used_region()); 455 if (mr.is_empty()) return NULL; 456 // Otherwise, find the obj that extends onto mr.start(). 457 458 assert(cur <= mr.start() 459 && (oop(cur)->klass_or_null() == NULL || 460 cur + oop(cur)->size() > mr.start()), 461 "postcondition of block_start"); 462 oop obj; 463 while (cur < mr.end()) { 464 obj = oop(cur); 465 if (obj->klass_or_null() == NULL) { 466 // Ran into an unparseable point. 467 return cur; 468 } else if (!g1h->is_obj_dead(obj)) { 469 cl->do_object(obj); 470 } 471 if (cl->abort()) return cur; 472 // The check above must occur before the operation below, since an 473 // abort might invalidate the "size" operation. 474 cur += obj->size(); 475 } 476 return NULL; 477 } 478 479 HeapWord* 480 HeapRegion:: 481 oops_on_card_seq_iterate_careful(MemRegion mr, 482 FilterOutOfRegionClosure* cl, 483 bool filter_young, 484 jbyte* card_ptr) { 485 // Currently, we should only have to clean the card if filter_young 486 // is true and vice versa. 487 if (filter_young) { 488 assert(card_ptr != NULL, "pre-condition"); 489 } else { 490 assert(card_ptr == NULL, "pre-condition"); 491 } 492 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 493 494 // If we're within a stop-world GC, then we might look at a card in a 495 // GC alloc region that extends onto a GC LAB, which may not be 496 // parseable. Stop such at the "saved_mark" of the region. 497 if (g1h->is_gc_active()) { 498 mr = mr.intersection(used_region_at_save_marks()); 499 } else { 500 mr = mr.intersection(used_region()); 501 } 502 if (mr.is_empty()) return NULL; 503 // Otherwise, find the obj that extends onto mr.start(). 504 505 // The intersection of the incoming mr (for the card) and the 506 // allocated part of the region is non-empty. This implies that 507 // we have actually allocated into this region. The code in 508 // G1CollectedHeap.cpp that allocates a new region sets the 509 // is_young tag on the region before allocating. Thus we 510 // safely know if this region is young. 511 if (is_young() && filter_young) { 512 return NULL; 513 } 514 515 assert(!is_young(), "check value of filter_young"); 516 517 // We can only clean the card here, after we make the decision that 518 // the card is not young. And we only clean the card if we have been 519 // asked to (i.e., card_ptr != NULL). 520 if (card_ptr != NULL) { 521 *card_ptr = CardTableModRefBS::clean_card_val(); 522 // We must complete this write before we do any of the reads below. 523 OrderAccess::storeload(); 524 } 525 526 // Cache the boundaries of the memory region in some const locals 527 HeapWord* const start = mr.start(); 528 HeapWord* const end = mr.end(); 529 530 // We used to use "block_start_careful" here. But we're actually happy 531 // to update the BOT while we do this... 532 HeapWord* cur = block_start(start); 533 assert(cur <= start, "Postcondition"); 534 535 oop obj; 536 537 HeapWord* next = cur; 538 while (next <= start) { 539 cur = next; 540 obj = oop(cur); 541 if (obj->klass_or_null() == NULL) { 542 // Ran into an unparseable point. 543 return cur; 544 } 545 // Otherwise... 546 next = (cur + obj->size()); 547 } 548 549 // If we finish the above loop...We have a parseable object that 550 // begins on or before the start of the memory region, and ends 551 // inside or spans the entire region. 552 553 assert(obj == oop(cur), "sanity"); 554 assert(cur <= start && 555 obj->klass_or_null() != NULL && 556 (cur + obj->size()) > start, 557 "Loop postcondition"); 558 559 if (!g1h->is_obj_dead(obj)) { 560 obj->oop_iterate(cl, mr); 561 } 562 563 while (cur < end) { 564 obj = oop(cur); 565 if (obj->klass_or_null() == NULL) { 566 // Ran into an unparseable point. 567 return cur; 568 }; 569 570 // Otherwise: 571 next = (cur + obj->size()); 572 573 if (!g1h->is_obj_dead(obj)) { 574 if (next < end || !obj->is_objArray()) { 575 // This object either does not span the MemRegion 576 // boundary, or if it does it's not an array. 577 // Apply closure to whole object. 578 obj->oop_iterate(cl); 579 } else { 580 // This obj is an array that spans the boundary. 581 // Stop at the boundary. 582 obj->oop_iterate(cl, mr); 583 } 584 } 585 cur = next; 586 } 587 return NULL; 588 } 589 590 // Code roots support 591 592 void HeapRegion::add_strong_code_root(nmethod* nm) { 593 HeapRegionRemSet* hrrs = rem_set(); 594 hrrs->add_strong_code_root(nm); 595 } 596 597 void HeapRegion::remove_strong_code_root(nmethod* nm) { 598 HeapRegionRemSet* hrrs = rem_set(); 599 hrrs->remove_strong_code_root(nm); 600 } 601 602 void HeapRegion::migrate_strong_code_roots() { 603 assert(in_collection_set(), "only collection set regions"); 604 assert(!isHumongous(), "not humongous regions"); 605 606 HeapRegionRemSet* hrrs = rem_set(); 607 hrrs->migrate_strong_code_roots(); 608 } 609 610 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 611 HeapRegionRemSet* hrrs = rem_set(); 612 hrrs->strong_code_roots_do(blk); 613 } 614 615 class VerifyStrongCodeRootOopClosure: public OopClosure { 616 const HeapRegion* _hr; 617 nmethod* _nm; 618 bool _failures; 619 bool _has_oops_in_region; 620 621 template <class T> void do_oop_work(T* p) { 622 T heap_oop = oopDesc::load_heap_oop(p); 623 if (!oopDesc::is_null(heap_oop)) { 624 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 625 626 // Note: not all the oops embedded in the nmethod are in the 627 // current region. We only look at those which are. 628 if (_hr->is_in(obj)) { 629 // Object is in the region. Check that its less than top 630 if (_hr->top() <= (HeapWord*)obj) { 631 // Object is above top 632 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " 633 "["PTR_FORMAT", "PTR_FORMAT") is above " 634 "top "PTR_FORMAT, 635 (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); 636 _failures = true; 637 return; 638 } 639 // Nmethod has at least one oop in the current region 640 _has_oops_in_region = true; 641 } 642 } 643 } 644 645 public: 646 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 647 _hr(hr), _failures(false), _has_oops_in_region(false) {} 648 649 void do_oop(narrowOop* p) { do_oop_work(p); } 650 void do_oop(oop* p) { do_oop_work(p); } 651 652 bool failures() { return _failures; } 653 bool has_oops_in_region() { return _has_oops_in_region; } 654 }; 655 656 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 657 const HeapRegion* _hr; 658 bool _failures; 659 public: 660 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 661 _hr(hr), _failures(false) {} 662 663 void do_code_blob(CodeBlob* cb) { 664 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 665 if (nm != NULL) { 666 // Verify that the nemthod is live 667 if (!nm->is_alive()) { 668 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " 669 PTR_FORMAT" in its strong code roots", 670 _hr->bottom(), _hr->end(), nm); 671 _failures = true; 672 } else { 673 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 674 nm->oops_do(&oop_cl); 675 if (!oop_cl.has_oops_in_region()) { 676 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " 677 PTR_FORMAT" in its strong code roots " 678 "with no pointers into region", 679 _hr->bottom(), _hr->end(), nm); 680 _failures = true; 681 } else if (oop_cl.failures()) { 682 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " 683 "failures for nmethod "PTR_FORMAT, 684 _hr->bottom(), _hr->end(), nm); 685 _failures = true; 686 } 687 } 688 } 689 } 690 691 bool failures() { return _failures; } 692 }; 693 694 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 695 if (!G1VerifyHeapRegionCodeRoots) { 696 // We're not verifying code roots. 697 return; 698 } 699 if (vo == VerifyOption_G1UseMarkWord) { 700 // Marking verification during a full GC is performed after class 701 // unloading, code cache unloading, etc so the strong code roots 702 // attached to each heap region are in an inconsistent state. They won't 703 // be consistent until the strong code roots are rebuilt after the 704 // actual GC. Skip verifying the strong code roots in this particular 705 // time. 706 assert(VerifyDuringGC, "only way to get here"); 707 return; 708 } 709 710 HeapRegionRemSet* hrrs = rem_set(); 711 int strong_code_roots_length = hrrs->strong_code_roots_list_length(); 712 713 // if this region is empty then there should be no entries 714 // on its strong code root list 715 if (is_empty()) { 716 if (strong_code_roots_length > 0) { 717 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " 718 "but has "INT32_FORMAT" code root entries", 719 bottom(), end(), strong_code_roots_length); 720 *failures = true; 721 } 722 return; 723 } 724 725 // An H-region should have an empty strong code root list 726 if (isHumongous()) { 727 if (strong_code_roots_length > 0) { 728 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 729 "but has "INT32_FORMAT" code root entries", 730 bottom(), end(), strong_code_roots_length); 731 *failures = true; 732 } 733 return; 734 } 735 736 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 737 strong_code_roots_do(&cb_cl); 738 739 if (cb_cl.failures()) { 740 *failures = true; 741 } 742 } 743 744 void HeapRegion::print() const { print_on(gclog_or_tty); } 745 void HeapRegion::print_on(outputStream* st) const { 746 if (isHumongous()) { 747 if (startsHumongous()) 748 st->print(" HS"); 749 else 750 st->print(" HC"); 751 } else { 752 st->print(" "); 753 } 754 if (in_collection_set()) 755 st->print(" CS"); 756 else 757 st->print(" "); 758 if (is_young()) 759 st->print(is_survivor() ? " SU" : " Y "); 760 else 761 st->print(" "); 762 if (is_empty()) 763 st->print(" F"); 764 else 765 st->print(" "); 766 st->print(" TS %5d", _gc_time_stamp); 767 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 768 prev_top_at_mark_start(), next_top_at_mark_start()); 769 G1OffsetTableContigSpace::print_on(st); 770 } 771 772 class VerifyLiveClosure: public OopClosure { 773 private: 774 G1CollectedHeap* _g1h; 775 CardTableModRefBS* _bs; 776 oop _containing_obj; 777 bool _failures; 778 int _n_failures; 779 VerifyOption _vo; 780 public: 781 // _vo == UsePrevMarking -> use "prev" marking information, 782 // _vo == UseNextMarking -> use "next" marking information, 783 // _vo == UseMarkWord -> use mark word from object header. 784 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 785 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 786 _failures(false), _n_failures(0), _vo(vo) 787 { 788 BarrierSet* bs = _g1h->barrier_set(); 789 if (bs->is_a(BarrierSet::CardTableModRef)) 790 _bs = (CardTableModRefBS*)bs; 791 } 792 793 void set_containing_obj(oop obj) { 794 _containing_obj = obj; 795 } 796 797 bool failures() { return _failures; } 798 int n_failures() { return _n_failures; } 799 800 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 801 virtual void do_oop( oop* p) { do_oop_work(p); } 802 803 void print_object(outputStream* out, oop obj) { 804 #ifdef PRODUCT 805 Klass* k = obj->klass(); 806 const char* class_name = InstanceKlass::cast(k)->external_name(); 807 out->print_cr("class name %s", class_name); 808 #else // PRODUCT 809 obj->print_on(out); 810 #endif // PRODUCT 811 } 812 813 template <class T> 814 void do_oop_work(T* p) { 815 assert(_containing_obj != NULL, "Precondition"); 816 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 817 "Precondition"); 818 T heap_oop = oopDesc::load_heap_oop(p); 819 if (!oopDesc::is_null(heap_oop)) { 820 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 821 bool failed = false; 822 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 823 MutexLockerEx x(ParGCRareEvent_lock, 824 Mutex::_no_safepoint_check_flag); 825 826 if (!_failures) { 827 gclog_or_tty->print_cr(""); 828 gclog_or_tty->print_cr("----------"); 829 } 830 if (!_g1h->is_in_closed_subset(obj)) { 831 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 832 gclog_or_tty->print_cr("Field "PTR_FORMAT 833 " of live obj "PTR_FORMAT" in region " 834 "["PTR_FORMAT", "PTR_FORMAT")", 835 p, (void*) _containing_obj, 836 from->bottom(), from->end()); 837 print_object(gclog_or_tty, _containing_obj); 838 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", 839 (void*) obj); 840 } else { 841 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 842 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 843 gclog_or_tty->print_cr("Field "PTR_FORMAT 844 " of live obj "PTR_FORMAT" in region " 845 "["PTR_FORMAT", "PTR_FORMAT")", 846 p, (void*) _containing_obj, 847 from->bottom(), from->end()); 848 print_object(gclog_or_tty, _containing_obj); 849 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " 850 "["PTR_FORMAT", "PTR_FORMAT")", 851 (void*) obj, to->bottom(), to->end()); 852 print_object(gclog_or_tty, obj); 853 } 854 gclog_or_tty->print_cr("----------"); 855 gclog_or_tty->flush(); 856 _failures = true; 857 failed = true; 858 _n_failures++; 859 } 860 861 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { 862 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 863 HeapRegion* to = _g1h->heap_region_containing(obj); 864 if (from != NULL && to != NULL && 865 from != to && 866 !to->isHumongous()) { 867 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 868 jbyte cv_field = *_bs->byte_for_const(p); 869 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 870 871 bool is_bad = !(from->is_young() 872 || to->rem_set()->contains_reference(p) 873 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 874 (_containing_obj->is_objArray() ? 875 cv_field == dirty 876 : cv_obj == dirty || cv_field == dirty)); 877 if (is_bad) { 878 MutexLockerEx x(ParGCRareEvent_lock, 879 Mutex::_no_safepoint_check_flag); 880 881 if (!_failures) { 882 gclog_or_tty->print_cr(""); 883 gclog_or_tty->print_cr("----------"); 884 } 885 gclog_or_tty->print_cr("Missing rem set entry:"); 886 gclog_or_tty->print_cr("Field "PTR_FORMAT" " 887 "of obj "PTR_FORMAT", " 888 "in region "HR_FORMAT, 889 p, (void*) _containing_obj, 890 HR_FORMAT_PARAMS(from)); 891 _containing_obj->print_on(gclog_or_tty); 892 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " 893 "in region "HR_FORMAT, 894 (void*) obj, 895 HR_FORMAT_PARAMS(to)); 896 obj->print_on(gclog_or_tty); 897 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 898 cv_obj, cv_field); 899 gclog_or_tty->print_cr("----------"); 900 gclog_or_tty->flush(); 901 _failures = true; 902 if (!failed) _n_failures++; 903 } 904 } 905 } 906 } 907 } 908 }; 909 910 // This really ought to be commoned up into OffsetTableContigSpace somehow. 911 // We would need a mechanism to make that code skip dead objects. 912 913 void HeapRegion::verify(VerifyOption vo, 914 bool* failures) const { 915 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 916 *failures = false; 917 HeapWord* p = bottom(); 918 HeapWord* prev_p = NULL; 919 VerifyLiveClosure vl_cl(g1, vo); 920 bool is_humongous = isHumongous(); 921 bool do_bot_verify = !is_young(); 922 size_t object_num = 0; 923 while (p < top()) { 924 oop obj = oop(p); 925 size_t obj_size = obj->size(); 926 object_num += 1; 927 928 if (is_humongous != g1->isHumongous(obj_size)) { 929 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 930 SIZE_FORMAT" words) in a %shumongous region", 931 p, g1->isHumongous(obj_size) ? "" : "non-", 932 obj_size, is_humongous ? "" : "non-"); 933 *failures = true; 934 return; 935 } 936 937 // If it returns false, verify_for_object() will output the 938 // appropriate messasge. 939 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) { 940 *failures = true; 941 return; 942 } 943 944 if (!g1->is_obj_dead_cond(obj, this, vo)) { 945 if (obj->is_oop()) { 946 Klass* klass = obj->klass(); 947 if (!klass->is_metaspace_object()) { 948 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 949 "not metadata", klass, (void *)obj); 950 *failures = true; 951 return; 952 } else if (!klass->is_klass()) { 953 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 954 "not a klass", klass, (void *)obj); 955 *failures = true; 956 return; 957 } else { 958 vl_cl.set_containing_obj(obj); 959 obj->oop_iterate_no_header(&vl_cl); 960 if (vl_cl.failures()) { 961 *failures = true; 962 } 963 if (G1MaxVerifyFailures >= 0 && 964 vl_cl.n_failures() >= G1MaxVerifyFailures) { 965 return; 966 } 967 } 968 } else { 969 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj); 970 *failures = true; 971 return; 972 } 973 } 974 prev_p = p; 975 p += obj_size; 976 } 977 978 if (p != top()) { 979 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " 980 "does not match top "PTR_FORMAT, p, top()); 981 *failures = true; 982 return; 983 } 984 985 HeapWord* the_end = end(); 986 assert(p == top(), "it should still hold"); 987 // Do some extra BOT consistency checking for addresses in the 988 // range [top, end). BOT look-ups in this range should yield 989 // top. No point in doing that if top == end (there's nothing there). 990 if (p < the_end) { 991 // Look up top 992 HeapWord* addr_1 = p; 993 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 994 if (b_start_1 != p) { 995 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " 996 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 997 addr_1, b_start_1, p); 998 *failures = true; 999 return; 1000 } 1001 1002 // Look up top + 1 1003 HeapWord* addr_2 = p + 1; 1004 if (addr_2 < the_end) { 1005 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 1006 if (b_start_2 != p) { 1007 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " 1008 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1009 addr_2, b_start_2, p); 1010 *failures = true; 1011 return; 1012 } 1013 } 1014 1015 // Look up an address between top and end 1016 size_t diff = pointer_delta(the_end, p) / 2; 1017 HeapWord* addr_3 = p + diff; 1018 if (addr_3 < the_end) { 1019 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 1020 if (b_start_3 != p) { 1021 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " 1022 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1023 addr_3, b_start_3, p); 1024 *failures = true; 1025 return; 1026 } 1027 } 1028 1029 // Loook up end - 1 1030 HeapWord* addr_4 = the_end - 1; 1031 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 1032 if (b_start_4 != p) { 1033 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " 1034 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1035 addr_4, b_start_4, p); 1036 *failures = true; 1037 return; 1038 } 1039 } 1040 1041 if (is_humongous && object_num > 1) { 1042 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 1043 "but has "SIZE_FORMAT", objects", 1044 bottom(), end(), object_num); 1045 *failures = true; 1046 return; 1047 } 1048 1049 verify_strong_code_roots(vo, failures); 1050 } 1051 1052 void HeapRegion::verify() const { 1053 bool dummy = false; 1054 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 1055 } 1056 1057 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1058 // away eventually. 1059 1060 void G1OffsetTableContigSpace::clear(bool mangle_space) { 1061 ContiguousSpace::clear(mangle_space); 1062 _offsets.zero_bottom_entry(); 1063 _offsets.initialize_threshold(); 1064 } 1065 1066 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 1067 Space::set_bottom(new_bottom); 1068 _offsets.set_bottom(new_bottom); 1069 } 1070 1071 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 1072 Space::set_end(new_end); 1073 _offsets.resize(new_end - bottom()); 1074 } 1075 1076 void G1OffsetTableContigSpace::print() const { 1077 print_short(); 1078 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1079 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1080 bottom(), top(), _offsets.threshold(), end()); 1081 } 1082 1083 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 1084 return _offsets.initialize_threshold(); 1085 } 1086 1087 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 1088 HeapWord* end) { 1089 _offsets.alloc_block(start, end); 1090 return _offsets.threshold(); 1091 } 1092 1093 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { 1094 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1095 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1096 if (_gc_time_stamp < g1h->get_gc_time_stamp()) 1097 return top(); 1098 else 1099 return ContiguousSpace::saved_mark_word(); 1100 } 1101 1102 void G1OffsetTableContigSpace::set_saved_mark() { 1103 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1104 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1105 1106 if (_gc_time_stamp < curr_gc_time_stamp) { 1107 // The order of these is important, as another thread might be 1108 // about to start scanning this region. If it does so after 1109 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1110 // will be false, and it will pick up top() as the high water mark 1111 // of region. If it does so after _gc_time_stamp = ..., then it 1112 // will pick up the right saved_mark_word() as the high water mark 1113 // of the region. Either way, the behaviour will be correct. 1114 ContiguousSpace::set_saved_mark(); 1115 OrderAccess::storestore(); 1116 _gc_time_stamp = curr_gc_time_stamp; 1117 // No need to do another barrier to flush the writes above. If 1118 // this is called in parallel with other threads trying to 1119 // allocate into the region, the caller should call this while 1120 // holding a lock and when the lock is released the writes will be 1121 // flushed. 1122 } 1123 } 1124 1125 G1OffsetTableContigSpace:: 1126 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1127 MemRegion mr) : 1128 _offsets(sharedOffsetArray, mr), 1129 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1130 _gc_time_stamp(0) 1131 { 1132 _offsets.set_space(this); 1133 // false ==> we'll do the clearing if there's clearing to be done. 1134 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); 1135 _offsets.zero_bottom_entry(); 1136 _offsets.initialize_threshold(); 1137 }