1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 33 #include "memory/genOopClosures.inline.hpp" 34 #include "memory/iterator.hpp" 35 #include "oops/oop.inline.hpp" 36 37 int HeapRegion::LogOfHRGrainBytes = 0; 38 int HeapRegion::LogOfHRGrainWords = 0; 39 size_t HeapRegion::GrainBytes = 0; 40 size_t HeapRegion::GrainWords = 0; 41 size_t HeapRegion::CardsPerRegion = 0; 42 43 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 44 HeapRegion* hr, ExtendedOopClosure* cl, 45 CardTableModRefBS::PrecisionStyle precision, 46 FilterKind fk) : 47 ContiguousSpaceDCTOC(hr, cl, precision, NULL), 48 _hr(hr), _fk(fk), _g1(g1) { } 49 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 51 OopClosure* oc) : 52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 53 54 template<class ClosureType> 55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 56 HeapRegion* hr, 57 HeapWord* cur, HeapWord* top) { 58 oop cur_oop = oop(cur); 59 int oop_size = cur_oop->size(); 60 HeapWord* next_obj = cur + oop_size; 61 while (next_obj < top) { 62 // Keep filtering the remembered set. 63 if (!g1h->is_obj_dead(cur_oop, hr)) { 64 // Bottom lies entirely below top, so we can call the 65 // non-memRegion version of oop_iterate below. 66 cur_oop->oop_iterate(cl); 67 } 68 cur = next_obj; 69 cur_oop = oop(cur); 70 oop_size = cur_oop->size(); 71 next_obj = cur + oop_size; 72 } 73 return cur; 74 } 75 76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, 77 HeapWord* bottom, 78 HeapWord* top, 79 ExtendedOopClosure* cl) { 80 G1CollectedHeap* g1h = _g1; 81 int oop_size; 82 ExtendedOopClosure* cl2 = NULL; 83 84 FilterIntoCSClosure intoCSFilt(this, g1h, cl); 85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); 86 87 switch (_fk) { 88 case NoFilterKind: cl2 = cl; break; 89 case IntoCSFilterKind: cl2 = &intoCSFilt; break; 90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; 91 default: ShouldNotReachHere(); 92 } 93 94 // Start filtering what we add to the remembered set. If the object is 95 // not considered dead, either because it is marked (in the mark bitmap) 96 // or it was allocated after marking finished, then we add it. Otherwise 97 // we can safely ignore the object. 98 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 99 oop_size = oop(bottom)->oop_iterate(cl2, mr); 100 } else { 101 oop_size = oop(bottom)->size(); 102 } 103 104 bottom += oop_size; 105 106 if (bottom < top) { 107 // We replicate the loop below for several kinds of possible filters. 108 switch (_fk) { 109 case NoFilterKind: 110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); 111 break; 112 113 case IntoCSFilterKind: { 114 FilterIntoCSClosure filt(this, g1h, cl); 115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 116 break; 117 } 118 119 case OutOfRegionFilterKind: { 120 FilterOutOfRegionClosure filt(_hr, cl); 121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 122 break; 123 } 124 125 default: 126 ShouldNotReachHere(); 127 } 128 129 // Last object. Need to do dead-obj filtering here too. 130 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 131 oop(bottom)->oop_iterate(cl2, mr); 132 } 133 } 134 } 135 136 // Minimum region size; we won't go lower than that. 137 // We might want to decrease this in the future, to deal with small 138 // heaps a bit more efficiently. 139 #define MIN_REGION_SIZE ( 1024 * 1024 ) 140 141 // Maximum region size; we don't go higher than that. There's a good 142 // reason for having an upper bound. We don't want regions to get too 143 // large, otherwise cleanup's effectiveness would decrease as there 144 // will be fewer opportunities to find totally empty regions after 145 // marking. 146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) 147 148 // The automatic region size calculation will try to have around this 149 // many regions in the heap (based on the min heap size). 150 #define TARGET_REGION_NUMBER 2048 151 152 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 153 uintx region_size = G1HeapRegionSize; 154 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 155 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 156 region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, 157 (uintx) MIN_REGION_SIZE); 158 } 159 160 int region_size_log = log2_long((jlong) region_size); 161 // Recalculate the region size to make sure it's a power of 162 // 2. This means that region_size is the largest power of 2 that's 163 // <= what we've calculated so far. 164 region_size = ((uintx)1 << region_size_log); 165 166 // Now make sure that we don't go over or under our limits. 167 if (region_size < MIN_REGION_SIZE) { 168 region_size = MIN_REGION_SIZE; 169 } else if (region_size > MAX_REGION_SIZE) { 170 region_size = MAX_REGION_SIZE; 171 } 172 173 if (region_size != G1HeapRegionSize) { 174 // Update the flag to make sure that PrintFlagsFinal logs the correct value 175 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); 176 } 177 178 // And recalculate the log. 179 region_size_log = log2_long((jlong) region_size); 180 181 // Now, set up the globals. 182 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 183 LogOfHRGrainBytes = region_size_log; 184 185 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 186 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 187 188 guarantee(GrainBytes == 0, "we should only set it once"); 189 // The cast to int is safe, given that we've bounded region_size by 190 // MIN_REGION_SIZE and MAX_REGION_SIZE. 191 GrainBytes = (size_t)region_size; 192 193 guarantee(GrainWords == 0, "we should only set it once"); 194 GrainWords = GrainBytes >> LogHeapWordSize; 195 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 196 197 guarantee(CardsPerRegion == 0, "we should only set it once"); 198 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 199 } 200 201 void HeapRegion::reset_after_compaction() { 202 G1OffsetTableContigSpace::reset_after_compaction(); 203 // After a compaction the mark bitmap is invalid, so we must 204 // treat all objects as being inside the unmarked area. 205 zero_marked_bytes(); 206 init_top_at_mark_start(); 207 } 208 209 void HeapRegion::hr_clear(bool par, bool clear_space) { 210 assert(_humongous_type == NotHumongous, 211 "we should have already filtered out humongous regions"); 212 assert(_humongous_start_region == NULL, 213 "we should have already filtered out humongous regions"); 214 assert(_end == _orig_end, 215 "we should have already filtered out humongous regions"); 216 217 _in_collection_set = false; 218 219 set_young_index_in_cset(-1); 220 uninstall_surv_rate_group(); 221 set_young_type(NotYoung); 222 reset_pre_dummy_top(); 223 224 if (!par) { 225 // If this is parallel, this will be done later. 226 HeapRegionRemSet* hrrs = rem_set(); 227 hrrs->clear(); 228 _claimed = InitialClaimValue; 229 } 230 zero_marked_bytes(); 231 232 _offsets.resize(HeapRegion::GrainWords); 233 init_top_at_mark_start(); 234 if (clear_space) clear(SpaceDecorator::Mangle); 235 } 236 237 void HeapRegion::par_clear() { 238 assert(used() == 0, "the region should have been already cleared"); 239 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 240 HeapRegionRemSet* hrrs = rem_set(); 241 hrrs->clear(); 242 CardTableModRefBS* ct_bs = 243 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 244 ct_bs->clear(MemRegion(bottom(), end())); 245 } 246 247 void HeapRegion::calc_gc_efficiency() { 248 // GC efficiency is the ratio of how much space would be 249 // reclaimed over how long we predict it would take to reclaim it. 250 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 251 G1CollectorPolicy* g1p = g1h->g1_policy(); 252 253 // Retrieve a prediction of the elapsed time for this region for 254 // a mixed gc because the region will only be evacuated during a 255 // mixed gc. 256 double region_elapsed_time_ms = 257 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 258 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 259 } 260 261 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { 262 assert(!isHumongous(), "sanity / pre-condition"); 263 assert(end() == _orig_end, 264 "Should be normal before the humongous object allocation"); 265 assert(top() == bottom(), "should be empty"); 266 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 267 268 _humongous_type = StartsHumongous; 269 _humongous_start_region = this; 270 271 set_end(new_end); 272 _offsets.set_for_starts_humongous(new_top); 273 } 274 275 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { 276 assert(!isHumongous(), "sanity / pre-condition"); 277 assert(end() == _orig_end, 278 "Should be normal before the humongous object allocation"); 279 assert(top() == bottom(), "should be empty"); 280 assert(first_hr->startsHumongous(), "pre-condition"); 281 282 _humongous_type = ContinuesHumongous; 283 _humongous_start_region = first_hr; 284 } 285 286 void HeapRegion::set_notHumongous() { 287 assert(isHumongous(), "pre-condition"); 288 289 if (startsHumongous()) { 290 assert(top() <= end(), "pre-condition"); 291 set_end(_orig_end); 292 if (top() > end()) { 293 // at least one "continues humongous" region after it 294 set_top(end()); 295 } 296 } else { 297 // continues humongous 298 assert(end() == _orig_end, "sanity"); 299 } 300 301 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 302 _humongous_type = NotHumongous; 303 _humongous_start_region = NULL; 304 } 305 306 bool HeapRegion::claimHeapRegion(jint claimValue) { 307 jint current = _claimed; 308 if (current != claimValue) { 309 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); 310 if (res == current) { 311 return true; 312 } 313 } 314 return false; 315 } 316 317 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { 318 HeapWord* low = addr; 319 HeapWord* high = end(); 320 while (low < high) { 321 size_t diff = pointer_delta(high, low); 322 // Must add one below to bias toward the high amount. Otherwise, if 323 // "high" were at the desired value, and "low" were one less, we 324 // would not converge on "high". This is not symmetric, because 325 // we set "high" to a block start, which might be the right one, 326 // which we don't do for "low". 327 HeapWord* middle = low + (diff+1)/2; 328 if (middle == high) return high; 329 HeapWord* mid_bs = block_start_careful(middle); 330 if (mid_bs < addr) { 331 low = middle; 332 } else { 333 high = mid_bs; 334 } 335 } 336 assert(low == high && low >= addr, "Didn't work."); 337 return low; 338 } 339 340 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 341 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 342 #endif // _MSC_VER 343 344 345 HeapRegion::HeapRegion(uint hrs_index, 346 G1BlockOffsetSharedArray* sharedOffsetArray, 347 MemRegion mr) : 348 G1OffsetTableContigSpace(sharedOffsetArray, mr), 349 _hrs_index(hrs_index), 350 _humongous_type(NotHumongous), _humongous_start_region(NULL), 351 _in_collection_set(false), 352 _next_in_special_set(NULL), _orig_end(NULL), 353 _claimed(InitialClaimValue), _evacuation_failed(false), 354 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 355 _young_type(NotYoung), _next_young_region(NULL), 356 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), 357 #ifdef ASSERT 358 _containing_set(NULL), 359 #endif // ASSERT 360 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 361 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 362 _predicted_bytes_to_copy(0) 363 { 364 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 365 _orig_end = mr.end(); 366 // Note that initialize() will set the start of the unmarked area of the 367 // region. 368 hr_clear(false /*par*/, false /*clear_space*/); 369 set_top(bottom()); 370 set_saved_mark(); 371 372 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 373 } 374 375 CompactibleSpace* HeapRegion::next_compaction_space() const { 376 // We're not using an iterator given that it will wrap around when 377 // it reaches the last region and this is not what we want here. 378 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 379 uint index = hrs_index() + 1; 380 while (index < g1h->n_regions()) { 381 HeapRegion* hr = g1h->region_at(index); 382 if (!hr->isHumongous()) { 383 return hr; 384 } 385 index += 1; 386 } 387 return NULL; 388 } 389 390 void HeapRegion::save_marks() { 391 set_saved_mark(); 392 } 393 394 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { 395 HeapWord* p = mr.start(); 396 HeapWord* e = mr.end(); 397 oop obj; 398 while (p < e) { 399 obj = oop(p); 400 p += obj->oop_iterate(cl); 401 } 402 assert(p == e, "bad memregion: doesn't end on obj boundary"); 403 } 404 405 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 406 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 407 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ 408 } 409 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) 410 411 412 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { 413 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); 414 } 415 416 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 417 bool during_conc_mark) { 418 // We always recreate the prev marking info and we'll explicitly 419 // mark all objects we find to be self-forwarded on the prev 420 // bitmap. So all objects need to be below PTAMS. 421 _prev_top_at_mark_start = top(); 422 _prev_marked_bytes = 0; 423 424 if (during_initial_mark) { 425 // During initial-mark, we'll also explicitly mark all objects 426 // we find to be self-forwarded on the next bitmap. So all 427 // objects need to be below NTAMS. 428 _next_top_at_mark_start = top(); 429 _next_marked_bytes = 0; 430 } else if (during_conc_mark) { 431 // During concurrent mark, all objects in the CSet (including 432 // the ones we find to be self-forwarded) are implicitly live. 433 // So all objects need to be above NTAMS. 434 _next_top_at_mark_start = bottom(); 435 _next_marked_bytes = 0; 436 } 437 } 438 439 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 440 bool during_conc_mark, 441 size_t marked_bytes) { 442 assert(0 <= marked_bytes && marked_bytes <= used(), 443 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, 444 marked_bytes, used())); 445 _prev_marked_bytes = marked_bytes; 446 } 447 448 HeapWord* 449 HeapRegion::object_iterate_mem_careful(MemRegion mr, 450 ObjectClosure* cl) { 451 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 452 // We used to use "block_start_careful" here. But we're actually happy 453 // to update the BOT while we do this... 454 HeapWord* cur = block_start(mr.start()); 455 mr = mr.intersection(used_region()); 456 if (mr.is_empty()) return NULL; 457 // Otherwise, find the obj that extends onto mr.start(). 458 459 assert(cur <= mr.start() 460 && (oop(cur)->klass_or_null() == NULL || 461 cur + oop(cur)->size() > mr.start()), 462 "postcondition of block_start"); 463 oop obj; 464 while (cur < mr.end()) { 465 obj = oop(cur); 466 if (obj->klass_or_null() == NULL) { 467 // Ran into an unparseable point. 468 return cur; 469 } else if (!g1h->is_obj_dead(obj)) { 470 cl->do_object(obj); 471 } 472 if (cl->abort()) return cur; 473 // The check above must occur before the operation below, since an 474 // abort might invalidate the "size" operation. 475 cur += obj->size(); 476 } 477 return NULL; 478 } 479 480 HeapWord* 481 HeapRegion:: 482 oops_on_card_seq_iterate_careful(MemRegion mr, 483 FilterOutOfRegionClosure* cl, 484 bool filter_young, 485 jbyte* card_ptr) { 486 // Currently, we should only have to clean the card if filter_young 487 // is true and vice versa. 488 if (filter_young) { 489 assert(card_ptr != NULL, "pre-condition"); 490 } else { 491 assert(card_ptr == NULL, "pre-condition"); 492 } 493 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 494 495 // If we're within a stop-world GC, then we might look at a card in a 496 // GC alloc region that extends onto a GC LAB, which may not be 497 // parseable. Stop such at the "saved_mark" of the region. 498 if (g1h->is_gc_active()) { 499 mr = mr.intersection(used_region_at_save_marks()); 500 } else { 501 mr = mr.intersection(used_region()); 502 } 503 if (mr.is_empty()) return NULL; 504 // Otherwise, find the obj that extends onto mr.start(). 505 506 // The intersection of the incoming mr (for the card) and the 507 // allocated part of the region is non-empty. This implies that 508 // we have actually allocated into this region. The code in 509 // G1CollectedHeap.cpp that allocates a new region sets the 510 // is_young tag on the region before allocating. Thus we 511 // safely know if this region is young. 512 if (is_young() && filter_young) { 513 return NULL; 514 } 515 516 assert(!is_young(), "check value of filter_young"); 517 518 // We can only clean the card here, after we make the decision that 519 // the card is not young. And we only clean the card if we have been 520 // asked to (i.e., card_ptr != NULL). 521 if (card_ptr != NULL) { 522 *card_ptr = CardTableModRefBS::clean_card_val(); 523 // We must complete this write before we do any of the reads below. 524 OrderAccess::storeload(); 525 } 526 527 // Cache the boundaries of the memory region in some const locals 528 HeapWord* const start = mr.start(); 529 HeapWord* const end = mr.end(); 530 531 // We used to use "block_start_careful" here. But we're actually happy 532 // to update the BOT while we do this... 533 HeapWord* cur = block_start(start); 534 assert(cur <= start, "Postcondition"); 535 536 oop obj; 537 538 HeapWord* next = cur; 539 while (next <= start) { 540 cur = next; 541 obj = oop(cur); 542 if (obj->klass_or_null() == NULL) { 543 // Ran into an unparseable point. 544 return cur; 545 } 546 // Otherwise... 547 next = (cur + obj->size()); 548 } 549 550 // If we finish the above loop...We have a parseable object that 551 // begins on or before the start of the memory region, and ends 552 // inside or spans the entire region. 553 554 assert(obj == oop(cur), "sanity"); 555 assert(cur <= start && 556 obj->klass_or_null() != NULL && 557 (cur + obj->size()) > start, 558 "Loop postcondition"); 559 560 if (!g1h->is_obj_dead(obj)) { 561 obj->oop_iterate(cl, mr); 562 } 563 564 while (cur < end) { 565 obj = oop(cur); 566 if (obj->klass_or_null() == NULL) { 567 // Ran into an unparseable point. 568 return cur; 569 }; 570 571 // Otherwise: 572 next = (cur + obj->size()); 573 574 if (!g1h->is_obj_dead(obj)) { 575 if (next < end || !obj->is_objArray()) { 576 // This object either does not span the MemRegion 577 // boundary, or if it does it's not an array. 578 // Apply closure to whole object. 579 obj->oop_iterate(cl); 580 } else { 581 // This obj is an array that spans the boundary. 582 // Stop at the boundary. 583 obj->oop_iterate(cl, mr); 584 } 585 } 586 cur = next; 587 } 588 return NULL; 589 } 590 591 // Code roots support 592 593 void HeapRegion::add_strong_code_root(nmethod* nm) { 594 HeapRegionRemSet* hrrs = rem_set(); 595 hrrs->add_strong_code_root(nm); 596 } 597 598 void HeapRegion::remove_strong_code_root(nmethod* nm) { 599 HeapRegionRemSet* hrrs = rem_set(); 600 hrrs->remove_strong_code_root(nm); 601 } 602 603 void HeapRegion::migrate_strong_code_roots() { 604 assert(in_collection_set(), "only collection set regions"); 605 assert(!isHumongous(), "not humongous regions"); 606 607 HeapRegionRemSet* hrrs = rem_set(); 608 hrrs->migrate_strong_code_roots(); 609 } 610 611 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 612 HeapRegionRemSet* hrrs = rem_set(); 613 hrrs->strong_code_roots_do(blk); 614 } 615 616 class VerifyStrongCodeRootOopClosure: public OopClosure { 617 const HeapRegion* _hr; 618 nmethod* _nm; 619 bool _failures; 620 bool _has_oops_in_region; 621 622 template <class T> void do_oop_work(T* p) { 623 T heap_oop = oopDesc::load_heap_oop(p); 624 if (!oopDesc::is_null(heap_oop)) { 625 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 626 627 // Note: not all the oops embedded in the nmethod are in the 628 // current region. We only look at those which are. 629 if (_hr->is_in(obj)) { 630 // Object is in the region. Check that its less than top 631 if (_hr->top() <= (HeapWord*)obj) { 632 // Object is above top 633 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " 634 "["PTR_FORMAT", "PTR_FORMAT") is above " 635 "top "PTR_FORMAT, 636 obj, _hr->bottom(), _hr->end(), _hr->top()); 637 _failures = true; 638 return; 639 } 640 // Nmethod has at least one oop in the current region 641 _has_oops_in_region = true; 642 } 643 } 644 } 645 646 public: 647 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 648 _hr(hr), _failures(false), _has_oops_in_region(false) {} 649 650 void do_oop(narrowOop* p) { do_oop_work(p); } 651 void do_oop(oop* p) { do_oop_work(p); } 652 653 bool failures() { return _failures; } 654 bool has_oops_in_region() { return _has_oops_in_region; } 655 }; 656 657 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 658 const HeapRegion* _hr; 659 bool _failures; 660 public: 661 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 662 _hr(hr), _failures(false) {} 663 664 void do_code_blob(CodeBlob* cb) { 665 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 666 if (nm != NULL) { 667 // Verify that the nemthod is live 668 if (!nm->is_alive()) { 669 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " 670 PTR_FORMAT" in its strong code roots", 671 _hr->bottom(), _hr->end(), nm); 672 _failures = true; 673 } else { 674 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 675 nm->oops_do(&oop_cl); 676 if (!oop_cl.has_oops_in_region()) { 677 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " 678 PTR_FORMAT" in its strong code roots " 679 "with no pointers into region", 680 _hr->bottom(), _hr->end(), nm); 681 _failures = true; 682 } else if (oop_cl.failures()) { 683 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " 684 "failures for nmethod "PTR_FORMAT, 685 _hr->bottom(), _hr->end(), nm); 686 _failures = true; 687 } 688 } 689 } 690 } 691 692 bool failures() { return _failures; } 693 }; 694 695 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 696 if (!G1VerifyHeapRegionCodeRoots) { 697 // We're not verifying code roots. 698 return; 699 } 700 if (vo == VerifyOption_G1UseMarkWord) { 701 // Marking verification during a full GC is performed after class 702 // unloading, code cache unloading, etc so the strong code roots 703 // attached to each heap region are in an inconsistent state. They won't 704 // be consistent until the strong code roots are rebuilt after the 705 // actual GC. Skip verifying the strong code roots in this particular 706 // time. 707 assert(VerifyDuringGC, "only way to get here"); 708 return; 709 } 710 711 HeapRegionRemSet* hrrs = rem_set(); 712 int strong_code_roots_length = hrrs->strong_code_roots_list_length(); 713 714 // if this region is empty then there should be no entries 715 // on its strong code root list 716 if (is_empty()) { 717 if (strong_code_roots_length > 0) { 718 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " 719 "but has "INT32_FORMAT" code root entries", 720 bottom(), end(), strong_code_roots_length); 721 *failures = true; 722 } 723 return; 724 } 725 726 // An H-region should have an empty strong code root list 727 if (isHumongous()) { 728 if (strong_code_roots_length > 0) { 729 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 730 "but has "INT32_FORMAT" code root entries", 731 bottom(), end(), strong_code_roots_length); 732 *failures = true; 733 } 734 return; 735 } 736 737 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 738 strong_code_roots_do(&cb_cl); 739 740 if (cb_cl.failures()) { 741 *failures = true; 742 } 743 } 744 745 void HeapRegion::print() const { print_on(gclog_or_tty); } 746 void HeapRegion::print_on(outputStream* st) const { 747 if (isHumongous()) { 748 if (startsHumongous()) 749 st->print(" HS"); 750 else 751 st->print(" HC"); 752 } else { 753 st->print(" "); 754 } 755 if (in_collection_set()) 756 st->print(" CS"); 757 else 758 st->print(" "); 759 if (is_young()) 760 st->print(is_survivor() ? " SU" : " Y "); 761 else 762 st->print(" "); 763 if (is_empty()) 764 st->print(" F"); 765 else 766 st->print(" "); 767 st->print(" TS %5d", _gc_time_stamp); 768 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 769 prev_top_at_mark_start(), next_top_at_mark_start()); 770 G1OffsetTableContigSpace::print_on(st); 771 } 772 773 class VerifyLiveClosure: public OopClosure { 774 private: 775 G1CollectedHeap* _g1h; 776 CardTableModRefBS* _bs; 777 oop _containing_obj; 778 bool _failures; 779 int _n_failures; 780 VerifyOption _vo; 781 public: 782 // _vo == UsePrevMarking -> use "prev" marking information, 783 // _vo == UseNextMarking -> use "next" marking information, 784 // _vo == UseMarkWord -> use mark word from object header. 785 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 786 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 787 _failures(false), _n_failures(0), _vo(vo) 788 { 789 BarrierSet* bs = _g1h->barrier_set(); 790 if (bs->is_a(BarrierSet::CardTableModRef)) 791 _bs = (CardTableModRefBS*)bs; 792 } 793 794 void set_containing_obj(oop obj) { 795 _containing_obj = obj; 796 } 797 798 bool failures() { return _failures; } 799 int n_failures() { return _n_failures; } 800 801 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 802 virtual void do_oop( oop* p) { do_oop_work(p); } 803 804 void print_object(outputStream* out, oop obj) { 805 #ifdef PRODUCT 806 Klass* k = obj->klass(); 807 const char* class_name = InstanceKlass::cast(k)->external_name(); 808 out->print_cr("class name %s", class_name); 809 #else // PRODUCT 810 obj->print_on(out); 811 #endif // PRODUCT 812 } 813 814 template <class T> 815 void do_oop_work(T* p) { 816 assert(_containing_obj != NULL, "Precondition"); 817 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 818 "Precondition"); 819 T heap_oop = oopDesc::load_heap_oop(p); 820 if (!oopDesc::is_null(heap_oop)) { 821 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 822 bool failed = false; 823 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 824 MutexLockerEx x(ParGCRareEvent_lock, 825 Mutex::_no_safepoint_check_flag); 826 827 if (!_failures) { 828 gclog_or_tty->print_cr(""); 829 gclog_or_tty->print_cr("----------"); 830 } 831 if (!_g1h->is_in_closed_subset(obj)) { 832 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 833 gclog_or_tty->print_cr("Field "PTR_FORMAT 834 " of live obj "PTR_FORMAT" in region " 835 "["PTR_FORMAT", "PTR_FORMAT")", 836 p, (void*) _containing_obj, 837 from->bottom(), from->end()); 838 print_object(gclog_or_tty, _containing_obj); 839 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", 840 (void*) obj); 841 } else { 842 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 843 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 844 gclog_or_tty->print_cr("Field "PTR_FORMAT 845 " of live obj "PTR_FORMAT" in region " 846 "["PTR_FORMAT", "PTR_FORMAT")", 847 p, (void*) _containing_obj, 848 from->bottom(), from->end()); 849 print_object(gclog_or_tty, _containing_obj); 850 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " 851 "["PTR_FORMAT", "PTR_FORMAT")", 852 (void*) obj, to->bottom(), to->end()); 853 print_object(gclog_or_tty, obj); 854 } 855 gclog_or_tty->print_cr("----------"); 856 gclog_or_tty->flush(); 857 _failures = true; 858 failed = true; 859 _n_failures++; 860 } 861 862 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { 863 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 864 HeapRegion* to = _g1h->heap_region_containing(obj); 865 if (from != NULL && to != NULL && 866 from != to && 867 !to->isHumongous()) { 868 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 869 jbyte cv_field = *_bs->byte_for_const(p); 870 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 871 872 bool is_bad = !(from->is_young() 873 || to->rem_set()->contains_reference(p) 874 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 875 (_containing_obj->is_objArray() ? 876 cv_field == dirty 877 : cv_obj == dirty || cv_field == dirty)); 878 if (is_bad) { 879 MutexLockerEx x(ParGCRareEvent_lock, 880 Mutex::_no_safepoint_check_flag); 881 882 if (!_failures) { 883 gclog_or_tty->print_cr(""); 884 gclog_or_tty->print_cr("----------"); 885 } 886 gclog_or_tty->print_cr("Missing rem set entry:"); 887 gclog_or_tty->print_cr("Field "PTR_FORMAT" " 888 "of obj "PTR_FORMAT", " 889 "in region "HR_FORMAT, 890 p, (void*) _containing_obj, 891 HR_FORMAT_PARAMS(from)); 892 _containing_obj->print_on(gclog_or_tty); 893 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " 894 "in region "HR_FORMAT, 895 (void*) obj, 896 HR_FORMAT_PARAMS(to)); 897 obj->print_on(gclog_or_tty); 898 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 899 cv_obj, cv_field); 900 gclog_or_tty->print_cr("----------"); 901 gclog_or_tty->flush(); 902 _failures = true; 903 if (!failed) _n_failures++; 904 } 905 } 906 } 907 } 908 } 909 }; 910 911 // This really ought to be commoned up into OffsetTableContigSpace somehow. 912 // We would need a mechanism to make that code skip dead objects. 913 914 void HeapRegion::verify(VerifyOption vo, 915 bool* failures) const { 916 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 917 *failures = false; 918 HeapWord* p = bottom(); 919 HeapWord* prev_p = NULL; 920 VerifyLiveClosure vl_cl(g1, vo); 921 bool is_humongous = isHumongous(); 922 bool do_bot_verify = !is_young(); 923 size_t object_num = 0; 924 while (p < top()) { 925 oop obj = oop(p); 926 size_t obj_size = obj->size(); 927 object_num += 1; 928 929 if (is_humongous != g1->isHumongous(obj_size)) { 930 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 931 SIZE_FORMAT" words) in a %shumongous region", 932 p, g1->isHumongous(obj_size) ? "" : "non-", 933 obj_size, is_humongous ? "" : "non-"); 934 *failures = true; 935 return; 936 } 937 938 // If it returns false, verify_for_object() will output the 939 // appropriate messasge. 940 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) { 941 *failures = true; 942 return; 943 } 944 945 if (!g1->is_obj_dead_cond(obj, this, vo)) { 946 if (obj->is_oop()) { 947 Klass* klass = obj->klass(); 948 if (!klass->is_metaspace_object()) { 949 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 950 "not metadata", klass, obj); 951 *failures = true; 952 return; 953 } else if (!klass->is_klass()) { 954 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 955 "not a klass", klass, obj); 956 *failures = true; 957 return; 958 } else { 959 vl_cl.set_containing_obj(obj); 960 obj->oop_iterate_no_header(&vl_cl); 961 if (vl_cl.failures()) { 962 *failures = true; 963 } 964 if (G1MaxVerifyFailures >= 0 && 965 vl_cl.n_failures() >= G1MaxVerifyFailures) { 966 return; 967 } 968 } 969 } else { 970 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); 971 *failures = true; 972 return; 973 } 974 } 975 prev_p = p; 976 p += obj_size; 977 } 978 979 if (p != top()) { 980 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " 981 "does not match top "PTR_FORMAT, p, top()); 982 *failures = true; 983 return; 984 } 985 986 HeapWord* the_end = end(); 987 assert(p == top(), "it should still hold"); 988 // Do some extra BOT consistency checking for addresses in the 989 // range [top, end). BOT look-ups in this range should yield 990 // top. No point in doing that if top == end (there's nothing there). 991 if (p < the_end) { 992 // Look up top 993 HeapWord* addr_1 = p; 994 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 995 if (b_start_1 != p) { 996 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " 997 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 998 addr_1, b_start_1, p); 999 *failures = true; 1000 return; 1001 } 1002 1003 // Look up top + 1 1004 HeapWord* addr_2 = p + 1; 1005 if (addr_2 < the_end) { 1006 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 1007 if (b_start_2 != p) { 1008 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " 1009 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1010 addr_2, b_start_2, p); 1011 *failures = true; 1012 return; 1013 } 1014 } 1015 1016 // Look up an address between top and end 1017 size_t diff = pointer_delta(the_end, p) / 2; 1018 HeapWord* addr_3 = p + diff; 1019 if (addr_3 < the_end) { 1020 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 1021 if (b_start_3 != p) { 1022 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " 1023 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1024 addr_3, b_start_3, p); 1025 *failures = true; 1026 return; 1027 } 1028 } 1029 1030 // Loook up end - 1 1031 HeapWord* addr_4 = the_end - 1; 1032 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 1033 if (b_start_4 != p) { 1034 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " 1035 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1036 addr_4, b_start_4, p); 1037 *failures = true; 1038 return; 1039 } 1040 } 1041 1042 if (is_humongous && object_num > 1) { 1043 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 1044 "but has "SIZE_FORMAT", objects", 1045 bottom(), end(), object_num); 1046 *failures = true; 1047 return; 1048 } 1049 1050 verify_strong_code_roots(vo, failures); 1051 } 1052 1053 void HeapRegion::verify() const { 1054 bool dummy = false; 1055 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 1056 } 1057 1058 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1059 // away eventually. 1060 1061 void G1OffsetTableContigSpace::clear(bool mangle_space) { 1062 ContiguousSpace::clear(mangle_space); 1063 _offsets.zero_bottom_entry(); 1064 _offsets.initialize_threshold(); 1065 } 1066 1067 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 1068 Space::set_bottom(new_bottom); 1069 _offsets.set_bottom(new_bottom); 1070 } 1071 1072 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 1073 Space::set_end(new_end); 1074 _offsets.resize(new_end - bottom()); 1075 } 1076 1077 void G1OffsetTableContigSpace::print() const { 1078 print_short(); 1079 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1080 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1081 bottom(), top(), _offsets.threshold(), end()); 1082 } 1083 1084 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 1085 return _offsets.initialize_threshold(); 1086 } 1087 1088 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 1089 HeapWord* end) { 1090 _offsets.alloc_block(start, end); 1091 return _offsets.threshold(); 1092 } 1093 1094 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { 1095 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1096 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1097 if (_gc_time_stamp < g1h->get_gc_time_stamp()) 1098 return top(); 1099 else 1100 return ContiguousSpace::saved_mark_word(); 1101 } 1102 1103 void G1OffsetTableContigSpace::set_saved_mark() { 1104 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1105 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1106 1107 if (_gc_time_stamp < curr_gc_time_stamp) { 1108 // The order of these is important, as another thread might be 1109 // about to start scanning this region. If it does so after 1110 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1111 // will be false, and it will pick up top() as the high water mark 1112 // of region. If it does so after _gc_time_stamp = ..., then it 1113 // will pick up the right saved_mark_word() as the high water mark 1114 // of the region. Either way, the behaviour will be correct. 1115 ContiguousSpace::set_saved_mark(); 1116 OrderAccess::storestore(); 1117 _gc_time_stamp = curr_gc_time_stamp; 1118 // No need to do another barrier to flush the writes above. If 1119 // this is called in parallel with other threads trying to 1120 // allocate into the region, the caller should call this while 1121 // holding a lock and when the lock is released the writes will be 1122 // flushed. 1123 } 1124 } 1125 1126 G1OffsetTableContigSpace:: 1127 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1128 MemRegion mr) : 1129 _offsets(sharedOffsetArray, mr), 1130 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1131 _gc_time_stamp(0) 1132 { 1133 _offsets.set_space(this); 1134 // false ==> we'll do the clearing if there's clearing to be done. 1135 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); 1136 _offsets.zero_bottom_entry(); 1137 _offsets.initialize_threshold(); 1138 }