1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 33 #include "memory/genOopClosures.inline.hpp" 34 #include "memory/iterator.hpp" 35 #include "memory/space.inline.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.inline.hpp" 38 #include "runtime/orderAccess.inline.hpp" 39 40 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 41 42 int HeapRegion::LogOfHRGrainBytes = 0; 43 int HeapRegion::LogOfHRGrainWords = 0; 44 size_t HeapRegion::GrainBytes = 0; 45 size_t HeapRegion::GrainWords = 0; 46 size_t HeapRegion::CardsPerRegion = 0; 47 48 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 49 HeapRegion* hr, ExtendedOopClosure* cl, 50 CardTableModRefBS::PrecisionStyle precision, 51 FilterKind fk) : 52 ContiguousSpaceDCTOC(hr, cl, precision, NULL), 53 _hr(hr), _fk(fk), _g1(g1) { } 54 55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 56 OopClosure* oc) : 57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 58 59 template<class ClosureType> 60 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 61 HeapRegion* hr, 62 HeapWord* cur, HeapWord* top) { 63 oop cur_oop = oop(cur); 64 int oop_size = cur_oop->size(); 65 HeapWord* next_obj = cur + oop_size; 66 while (next_obj < top) { 67 // Keep filtering the remembered set. 68 if (!g1h->is_obj_dead(cur_oop, hr)) { 69 // Bottom lies entirely below top, so we can call the 70 // non-memRegion version of oop_iterate below. 71 cur_oop->oop_iterate(cl); 72 } 73 cur = next_obj; 74 cur_oop = oop(cur); 75 oop_size = cur_oop->size(); 76 next_obj = cur + oop_size; 77 } 78 return cur; 79 } 80 81 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, 82 HeapWord* bottom, 83 HeapWord* top, 84 ExtendedOopClosure* cl) { 85 G1CollectedHeap* g1h = _g1; 86 int oop_size; 87 ExtendedOopClosure* cl2 = NULL; 88 89 FilterIntoCSClosure intoCSFilt(this, g1h, cl); 90 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); 91 92 switch (_fk) { 93 case NoFilterKind: cl2 = cl; break; 94 case IntoCSFilterKind: cl2 = &intoCSFilt; break; 95 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; 96 default: ShouldNotReachHere(); 97 } 98 99 // Start filtering what we add to the remembered set. If the object is 100 // not considered dead, either because it is marked (in the mark bitmap) 101 // or it was allocated after marking finished, then we add it. Otherwise 102 // we can safely ignore the object. 103 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 104 oop_size = oop(bottom)->oop_iterate(cl2, mr); 105 } else { 106 oop_size = oop(bottom)->size(); 107 } 108 109 bottom += oop_size; 110 111 if (bottom < top) { 112 // We replicate the loop below for several kinds of possible filters. 113 switch (_fk) { 114 case NoFilterKind: 115 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); 116 break; 117 118 case IntoCSFilterKind: { 119 FilterIntoCSClosure filt(this, g1h, cl); 120 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 121 break; 122 } 123 124 case OutOfRegionFilterKind: { 125 FilterOutOfRegionClosure filt(_hr, cl); 126 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 127 break; 128 } 129 130 default: 131 ShouldNotReachHere(); 132 } 133 134 // Last object. Need to do dead-obj filtering here too. 135 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 136 oop(bottom)->oop_iterate(cl2, mr); 137 } 138 } 139 } 140 141 // Minimum region size; we won't go lower than that. 142 // We might want to decrease this in the future, to deal with small 143 // heaps a bit more efficiently. 144 #define MIN_REGION_SIZE ( 1024 * 1024 ) 145 146 // Maximum region size; we don't go higher than that. There's a good 147 // reason for having an upper bound. We don't want regions to get too 148 // large, otherwise cleanup's effectiveness would decrease as there 149 // will be fewer opportunities to find totally empty regions after 150 // marking. 151 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) 152 153 // The automatic region size calculation will try to have around this 154 // many regions in the heap (based on the min heap size). 155 #define TARGET_REGION_NUMBER 2048 156 157 size_t HeapRegion::max_region_size() { 158 return (size_t)MAX_REGION_SIZE; 159 } 160 161 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 162 uintx region_size = G1HeapRegionSize; 163 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 164 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 165 region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, 166 (uintx) MIN_REGION_SIZE); 167 } 168 169 int region_size_log = log2_long((jlong) region_size); 170 // Recalculate the region size to make sure it's a power of 171 // 2. This means that region_size is the largest power of 2 that's 172 // <= what we've calculated so far. 173 region_size = ((uintx)1 << region_size_log); 174 175 // Now make sure that we don't go over or under our limits. 176 if (region_size < MIN_REGION_SIZE) { 177 region_size = MIN_REGION_SIZE; 178 } else if (region_size > MAX_REGION_SIZE) { 179 region_size = MAX_REGION_SIZE; 180 } 181 182 // And recalculate the log. 183 region_size_log = log2_long((jlong) region_size); 184 185 // Now, set up the globals. 186 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 187 LogOfHRGrainBytes = region_size_log; 188 189 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 190 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 191 192 guarantee(GrainBytes == 0, "we should only set it once"); 193 // The cast to int is safe, given that we've bounded region_size by 194 // MIN_REGION_SIZE and MAX_REGION_SIZE. 195 GrainBytes = (size_t)region_size; 196 197 guarantee(GrainWords == 0, "we should only set it once"); 198 GrainWords = GrainBytes >> LogHeapWordSize; 199 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 200 201 guarantee(CardsPerRegion == 0, "we should only set it once"); 202 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 203 } 204 205 void HeapRegion::reset_after_compaction() { 206 G1OffsetTableContigSpace::reset_after_compaction(); 207 // After a compaction the mark bitmap is invalid, so we must 208 // treat all objects as being inside the unmarked area. 209 zero_marked_bytes(); 210 init_top_at_mark_start(); 211 } 212 213 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 214 assert(_humongous_type == NotHumongous, 215 "we should have already filtered out humongous regions"); 216 assert(_humongous_start_region == NULL, 217 "we should have already filtered out humongous regions"); 218 assert(_end == _orig_end, 219 "we should have already filtered out humongous regions"); 220 221 _in_collection_set = false; 222 223 set_young_index_in_cset(-1); 224 uninstall_surv_rate_group(); 225 set_young_type(NotYoung); 226 reset_pre_dummy_top(); 227 228 if (!par) { 229 // If this is parallel, this will be done later. 230 HeapRegionRemSet* hrrs = rem_set(); 231 if (locked) { 232 hrrs->clear_locked(); 233 } else { 234 hrrs->clear(); 235 } 236 _claimed = InitialClaimValue; 237 } 238 zero_marked_bytes(); 239 240 _offsets.resize(HeapRegion::GrainWords); 241 init_top_at_mark_start(); 242 if (clear_space) clear(SpaceDecorator::Mangle); 243 } 244 245 void HeapRegion::par_clear() { 246 assert(used() == 0, "the region should have been already cleared"); 247 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 248 HeapRegionRemSet* hrrs = rem_set(); 249 hrrs->clear(); 250 CardTableModRefBS* ct_bs = 251 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); 252 ct_bs->clear(MemRegion(bottom(), end())); 253 } 254 255 void HeapRegion::calc_gc_efficiency() { 256 // GC efficiency is the ratio of how much space would be 257 // reclaimed over how long we predict it would take to reclaim it. 258 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 259 G1CollectorPolicy* g1p = g1h->g1_policy(); 260 261 // Retrieve a prediction of the elapsed time for this region for 262 // a mixed gc because the region will only be evacuated during a 263 // mixed gc. 264 double region_elapsed_time_ms = 265 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 266 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 267 } 268 269 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { 270 assert(!isHumongous(), "sanity / pre-condition"); 271 assert(end() == _orig_end, 272 "Should be normal before the humongous object allocation"); 273 assert(top() == bottom(), "should be empty"); 274 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 275 276 _humongous_type = StartsHumongous; 277 _humongous_start_region = this; 278 279 set_end(new_end); 280 _offsets.set_for_starts_humongous(new_top); 281 } 282 283 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { 284 assert(!isHumongous(), "sanity / pre-condition"); 285 assert(end() == _orig_end, 286 "Should be normal before the humongous object allocation"); 287 assert(top() == bottom(), "should be empty"); 288 assert(first_hr->startsHumongous(), "pre-condition"); 289 290 _humongous_type = ContinuesHumongous; 291 _humongous_start_region = first_hr; 292 } 293 294 void HeapRegion::set_notHumongous() { 295 assert(isHumongous(), "pre-condition"); 296 297 if (startsHumongous()) { 298 assert(top() <= end(), "pre-condition"); 299 set_end(_orig_end); 300 if (top() > end()) { 301 // at least one "continues humongous" region after it 302 set_top(end()); 303 } 304 } else { 305 // continues humongous 306 assert(end() == _orig_end, "sanity"); 307 } 308 309 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 310 _humongous_type = NotHumongous; 311 _humongous_start_region = NULL; 312 } 313 314 bool HeapRegion::claimHeapRegion(jint claimValue) { 315 jint current = _claimed; 316 if (current != claimValue) { 317 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); 318 if (res == current) { 319 return true; 320 } 321 } 322 return false; 323 } 324 325 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { 326 HeapWord* low = addr; 327 HeapWord* high = end(); 328 while (low < high) { 329 size_t diff = pointer_delta(high, low); 330 // Must add one below to bias toward the high amount. Otherwise, if 331 // "high" were at the desired value, and "low" were one less, we 332 // would not converge on "high". This is not symmetric, because 333 // we set "high" to a block start, which might be the right one, 334 // which we don't do for "low". 335 HeapWord* middle = low + (diff+1)/2; 336 if (middle == high) return high; 337 HeapWord* mid_bs = block_start_careful(middle); 338 if (mid_bs < addr) { 339 low = middle; 340 } else { 341 high = mid_bs; 342 } 343 } 344 assert(low == high && low >= addr, "Didn't work."); 345 return low; 346 } 347 348 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 349 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 350 #endif // _MSC_VER 351 352 353 HeapRegion::HeapRegion(uint hrs_index, 354 G1BlockOffsetSharedArray* sharedOffsetArray, 355 MemRegion mr) : 356 G1OffsetTableContigSpace(sharedOffsetArray, mr), 357 _hrs_index(hrs_index), 358 _humongous_type(NotHumongous), _humongous_start_region(NULL), 359 _in_collection_set(false), 360 _next_in_special_set(NULL), _orig_end(NULL), 361 _claimed(InitialClaimValue), _evacuation_failed(false), 362 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 363 _young_type(NotYoung), _next_young_region(NULL), 364 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false), 365 #ifdef ASSERT 366 _containing_set(NULL), 367 #endif // ASSERT 368 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 369 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 370 _predicted_bytes_to_copy(0) 371 { 372 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 373 _orig_end = mr.end(); 374 // Note that initialize() will set the start of the unmarked area of the 375 // region. 376 hr_clear(false /*par*/, false /*clear_space*/); 377 set_top(bottom()); 378 set_saved_mark(); 379 380 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); 381 } 382 383 CompactibleSpace* HeapRegion::next_compaction_space() const { 384 // We're not using an iterator given that it will wrap around when 385 // it reaches the last region and this is not what we want here. 386 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 387 uint index = hrs_index() + 1; 388 while (index < g1h->n_regions()) { 389 HeapRegion* hr = g1h->region_at(index); 390 if (!hr->isHumongous()) { 391 return hr; 392 } 393 index += 1; 394 } 395 return NULL; 396 } 397 398 void HeapRegion::save_marks() { 399 set_saved_mark(); 400 } 401 402 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { 403 HeapWord* p = mr.start(); 404 HeapWord* e = mr.end(); 405 oop obj; 406 while (p < e) { 407 obj = oop(p); 408 p += obj->oop_iterate(cl); 409 } 410 assert(p == e, "bad memregion: doesn't end on obj boundary"); 411 } 412 413 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 414 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 415 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ 416 } 417 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) 418 419 420 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { 421 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); 422 } 423 424 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 425 bool during_conc_mark) { 426 // We always recreate the prev marking info and we'll explicitly 427 // mark all objects we find to be self-forwarded on the prev 428 // bitmap. So all objects need to be below PTAMS. 429 _prev_top_at_mark_start = top(); 430 _prev_marked_bytes = 0; 431 432 if (during_initial_mark) { 433 // During initial-mark, we'll also explicitly mark all objects 434 // we find to be self-forwarded on the next bitmap. So all 435 // objects need to be below NTAMS. 436 _next_top_at_mark_start = top(); 437 _next_marked_bytes = 0; 438 } else if (during_conc_mark) { 439 // During concurrent mark, all objects in the CSet (including 440 // the ones we find to be self-forwarded) are implicitly live. 441 // So all objects need to be above NTAMS. 442 _next_top_at_mark_start = bottom(); 443 _next_marked_bytes = 0; 444 } 445 } 446 447 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 448 bool during_conc_mark, 449 size_t marked_bytes) { 450 assert(0 <= marked_bytes && marked_bytes <= used(), 451 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, 452 marked_bytes, used())); 453 _prev_marked_bytes = marked_bytes; 454 } 455 456 HeapWord* 457 HeapRegion::object_iterate_mem_careful(MemRegion mr, 458 ObjectClosure* cl) { 459 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 460 // We used to use "block_start_careful" here. But we're actually happy 461 // to update the BOT while we do this... 462 HeapWord* cur = block_start(mr.start()); 463 mr = mr.intersection(used_region()); 464 if (mr.is_empty()) return NULL; 465 // Otherwise, find the obj that extends onto mr.start(). 466 467 assert(cur <= mr.start() 468 && (oop(cur)->klass_or_null() == NULL || 469 cur + oop(cur)->size() > mr.start()), 470 "postcondition of block_start"); 471 oop obj; 472 while (cur < mr.end()) { 473 obj = oop(cur); 474 if (obj->klass_or_null() == NULL) { 475 // Ran into an unparseable point. 476 return cur; 477 } else if (!g1h->is_obj_dead(obj)) { 478 cl->do_object(obj); 479 } 480 cur += obj->size(); 481 } 482 return NULL; 483 } 484 485 HeapWord* 486 HeapRegion:: 487 oops_on_card_seq_iterate_careful(MemRegion mr, 488 FilterOutOfRegionClosure* cl, 489 bool filter_young, 490 jbyte* card_ptr) { 491 // Currently, we should only have to clean the card if filter_young 492 // is true and vice versa. 493 if (filter_young) { 494 assert(card_ptr != NULL, "pre-condition"); 495 } else { 496 assert(card_ptr == NULL, "pre-condition"); 497 } 498 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 499 500 // If we're within a stop-world GC, then we might look at a card in a 501 // GC alloc region that extends onto a GC LAB, which may not be 502 // parseable. Stop such at the "saved_mark" of the region. 503 if (g1h->is_gc_active()) { 504 mr = mr.intersection(used_region_at_save_marks()); 505 } else { 506 mr = mr.intersection(used_region()); 507 } 508 if (mr.is_empty()) return NULL; 509 // Otherwise, find the obj that extends onto mr.start(). 510 511 // The intersection of the incoming mr (for the card) and the 512 // allocated part of the region is non-empty. This implies that 513 // we have actually allocated into this region. The code in 514 // G1CollectedHeap.cpp that allocates a new region sets the 515 // is_young tag on the region before allocating. Thus we 516 // safely know if this region is young. 517 if (is_young() && filter_young) { 518 return NULL; 519 } 520 521 assert(!is_young(), "check value of filter_young"); 522 523 // We can only clean the card here, after we make the decision that 524 // the card is not young. And we only clean the card if we have been 525 // asked to (i.e., card_ptr != NULL). 526 if (card_ptr != NULL) { 527 *card_ptr = CardTableModRefBS::clean_card_val(); 528 // We must complete this write before we do any of the reads below. 529 OrderAccess::storeload(); 530 } 531 532 // Cache the boundaries of the memory region in some const locals 533 HeapWord* const start = mr.start(); 534 HeapWord* const end = mr.end(); 535 536 // We used to use "block_start_careful" here. But we're actually happy 537 // to update the BOT while we do this... 538 HeapWord* cur = block_start(start); 539 assert(cur <= start, "Postcondition"); 540 541 oop obj; 542 543 HeapWord* next = cur; 544 while (next <= start) { 545 cur = next; 546 obj = oop(cur); 547 if (obj->klass_or_null() == NULL) { 548 // Ran into an unparseable point. 549 return cur; 550 } 551 // Otherwise... 552 next = (cur + obj->size()); 553 } 554 555 // If we finish the above loop...We have a parseable object that 556 // begins on or before the start of the memory region, and ends 557 // inside or spans the entire region. 558 559 assert(obj == oop(cur), "sanity"); 560 assert(cur <= start && 561 obj->klass_or_null() != NULL && 562 (cur + obj->size()) > start, 563 "Loop postcondition"); 564 565 if (!g1h->is_obj_dead(obj)) { 566 obj->oop_iterate(cl, mr); 567 } 568 569 while (cur < end) { 570 obj = oop(cur); 571 if (obj->klass_or_null() == NULL) { 572 // Ran into an unparseable point. 573 return cur; 574 }; 575 576 // Otherwise: 577 next = (cur + obj->size()); 578 579 if (!g1h->is_obj_dead(obj)) { 580 if (next < end || !obj->is_objArray()) { 581 // This object either does not span the MemRegion 582 // boundary, or if it does it's not an array. 583 // Apply closure to whole object. 584 obj->oop_iterate(cl); 585 } else { 586 // This obj is an array that spans the boundary. 587 // Stop at the boundary. 588 obj->oop_iterate(cl, mr); 589 } 590 } 591 cur = next; 592 } 593 return NULL; 594 } 595 596 // Code roots support 597 598 void HeapRegion::add_strong_code_root(nmethod* nm) { 599 HeapRegionRemSet* hrrs = rem_set(); 600 hrrs->add_strong_code_root(nm); 601 } 602 603 void HeapRegion::remove_strong_code_root(nmethod* nm) { 604 HeapRegionRemSet* hrrs = rem_set(); 605 hrrs->remove_strong_code_root(nm); 606 } 607 608 void HeapRegion::migrate_strong_code_roots() { 609 assert(in_collection_set(), "only collection set regions"); 610 assert(!isHumongous(), 611 err_msg("humongous region "HR_FORMAT" should not have been added to collection set", 612 HR_FORMAT_PARAMS(this))); 613 614 HeapRegionRemSet* hrrs = rem_set(); 615 hrrs->migrate_strong_code_roots(); 616 } 617 618 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 619 HeapRegionRemSet* hrrs = rem_set(); 620 hrrs->strong_code_roots_do(blk); 621 } 622 623 class VerifyStrongCodeRootOopClosure: public OopClosure { 624 const HeapRegion* _hr; 625 nmethod* _nm; 626 bool _failures; 627 bool _has_oops_in_region; 628 629 template <class T> void do_oop_work(T* p) { 630 T heap_oop = oopDesc::load_heap_oop(p); 631 if (!oopDesc::is_null(heap_oop)) { 632 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 633 634 // Note: not all the oops embedded in the nmethod are in the 635 // current region. We only look at those which are. 636 if (_hr->is_in(obj)) { 637 // Object is in the region. Check that its less than top 638 if (_hr->top() <= (HeapWord*)obj) { 639 // Object is above top 640 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " 641 "["PTR_FORMAT", "PTR_FORMAT") is above " 642 "top "PTR_FORMAT, 643 (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); 644 _failures = true; 645 return; 646 } 647 // Nmethod has at least one oop in the current region 648 _has_oops_in_region = true; 649 } 650 } 651 } 652 653 public: 654 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 655 _hr(hr), _failures(false), _has_oops_in_region(false) {} 656 657 void do_oop(narrowOop* p) { do_oop_work(p); } 658 void do_oop(oop* p) { do_oop_work(p); } 659 660 bool failures() { return _failures; } 661 bool has_oops_in_region() { return _has_oops_in_region; } 662 }; 663 664 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 665 const HeapRegion* _hr; 666 bool _failures; 667 public: 668 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 669 _hr(hr), _failures(false) {} 670 671 void do_code_blob(CodeBlob* cb) { 672 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 673 if (nm != NULL) { 674 // Verify that the nemthod is live 675 if (!nm->is_alive()) { 676 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " 677 PTR_FORMAT" in its strong code roots", 678 _hr->bottom(), _hr->end(), nm); 679 _failures = true; 680 } else { 681 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 682 nm->oops_do(&oop_cl); 683 if (!oop_cl.has_oops_in_region()) { 684 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " 685 PTR_FORMAT" in its strong code roots " 686 "with no pointers into region", 687 _hr->bottom(), _hr->end(), nm); 688 _failures = true; 689 } else if (oop_cl.failures()) { 690 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " 691 "failures for nmethod "PTR_FORMAT, 692 _hr->bottom(), _hr->end(), nm); 693 _failures = true; 694 } 695 } 696 } 697 } 698 699 bool failures() { return _failures; } 700 }; 701 702 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 703 if (!G1VerifyHeapRegionCodeRoots) { 704 // We're not verifying code roots. 705 return; 706 } 707 if (vo == VerifyOption_G1UseMarkWord) { 708 // Marking verification during a full GC is performed after class 709 // unloading, code cache unloading, etc so the strong code roots 710 // attached to each heap region are in an inconsistent state. They won't 711 // be consistent until the strong code roots are rebuilt after the 712 // actual GC. Skip verifying the strong code roots in this particular 713 // time. 714 assert(VerifyDuringGC, "only way to get here"); 715 return; 716 } 717 718 HeapRegionRemSet* hrrs = rem_set(); 719 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 720 721 // if this region is empty then there should be no entries 722 // on its strong code root list 723 if (is_empty()) { 724 if (strong_code_roots_length > 0) { 725 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " 726 "but has "SIZE_FORMAT" code root entries", 727 bottom(), end(), strong_code_roots_length); 728 *failures = true; 729 } 730 return; 731 } 732 733 if (continuesHumongous()) { 734 if (strong_code_roots_length > 0) { 735 gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " 736 "region but has "SIZE_FORMAT" code root entries", 737 HR_FORMAT_PARAMS(this), strong_code_roots_length); 738 *failures = true; 739 } 740 return; 741 } 742 743 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 744 strong_code_roots_do(&cb_cl); 745 746 if (cb_cl.failures()) { 747 *failures = true; 748 } 749 } 750 751 void HeapRegion::print() const { print_on(gclog_or_tty); } 752 void HeapRegion::print_on(outputStream* st) const { 753 if (isHumongous()) { 754 if (startsHumongous()) 755 st->print(" HS"); 756 else 757 st->print(" HC"); 758 } else { 759 st->print(" "); 760 } 761 if (in_collection_set()) 762 st->print(" CS"); 763 else 764 st->print(" "); 765 if (is_young()) 766 st->print(is_survivor() ? " SU" : " Y "); 767 else 768 st->print(" "); 769 if (is_empty()) 770 st->print(" F"); 771 else 772 st->print(" "); 773 st->print(" TS %5d", _gc_time_stamp); 774 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 775 prev_top_at_mark_start(), next_top_at_mark_start()); 776 G1OffsetTableContigSpace::print_on(st); 777 } 778 779 class VerifyLiveClosure: public OopClosure { 780 private: 781 G1CollectedHeap* _g1h; 782 CardTableModRefBS* _bs; 783 oop _containing_obj; 784 bool _failures; 785 int _n_failures; 786 VerifyOption _vo; 787 public: 788 // _vo == UsePrevMarking -> use "prev" marking information, 789 // _vo == UseNextMarking -> use "next" marking information, 790 // _vo == UseMarkWord -> use mark word from object header. 791 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : 792 _g1h(g1h), _bs(NULL), _containing_obj(NULL), 793 _failures(false), _n_failures(0), _vo(vo) 794 { 795 BarrierSet* bs = _g1h->barrier_set(); 796 if (bs->is_a(BarrierSet::CardTableModRef)) 797 _bs = (CardTableModRefBS*)bs; 798 } 799 800 void set_containing_obj(oop obj) { 801 _containing_obj = obj; 802 } 803 804 bool failures() { return _failures; } 805 int n_failures() { return _n_failures; } 806 807 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 808 virtual void do_oop( oop* p) { do_oop_work(p); } 809 810 void print_object(outputStream* out, oop obj) { 811 #ifdef PRODUCT 812 Klass* k = obj->klass(); 813 const char* class_name = InstanceKlass::cast(k)->external_name(); 814 out->print_cr("class name %s", class_name); 815 #else // PRODUCT 816 obj->print_on(out); 817 #endif // PRODUCT 818 } 819 820 template <class T> 821 void do_oop_work(T* p) { 822 assert(_containing_obj != NULL, "Precondition"); 823 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 824 "Precondition"); 825 T heap_oop = oopDesc::load_heap_oop(p); 826 if (!oopDesc::is_null(heap_oop)) { 827 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 828 bool failed = false; 829 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 830 MutexLockerEx x(ParGCRareEvent_lock, 831 Mutex::_no_safepoint_check_flag); 832 833 if (!_failures) { 834 gclog_or_tty->cr(); 835 gclog_or_tty->print_cr("----------"); 836 } 837 if (!_g1h->is_in_closed_subset(obj)) { 838 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 839 gclog_or_tty->print_cr("Field "PTR_FORMAT 840 " of live obj "PTR_FORMAT" in region " 841 "["PTR_FORMAT", "PTR_FORMAT")", 842 p, (void*) _containing_obj, 843 from->bottom(), from->end()); 844 print_object(gclog_or_tty, _containing_obj); 845 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", 846 (void*) obj); 847 } else { 848 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 849 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 850 gclog_or_tty->print_cr("Field "PTR_FORMAT 851 " of live obj "PTR_FORMAT" in region " 852 "["PTR_FORMAT", "PTR_FORMAT")", 853 p, (void*) _containing_obj, 854 from->bottom(), from->end()); 855 print_object(gclog_or_tty, _containing_obj); 856 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " 857 "["PTR_FORMAT", "PTR_FORMAT")", 858 (void*) obj, to->bottom(), to->end()); 859 print_object(gclog_or_tty, obj); 860 } 861 gclog_or_tty->print_cr("----------"); 862 gclog_or_tty->flush(); 863 _failures = true; 864 failed = true; 865 _n_failures++; 866 } 867 868 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { 869 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 870 HeapRegion* to = _g1h->heap_region_containing(obj); 871 if (from != NULL && to != NULL && 872 from != to && 873 !to->isHumongous()) { 874 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 875 jbyte cv_field = *_bs->byte_for_const(p); 876 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 877 878 bool is_bad = !(from->is_young() 879 || to->rem_set()->contains_reference(p) 880 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 881 (_containing_obj->is_objArray() ? 882 cv_field == dirty 883 : cv_obj == dirty || cv_field == dirty)); 884 if (is_bad) { 885 MutexLockerEx x(ParGCRareEvent_lock, 886 Mutex::_no_safepoint_check_flag); 887 888 if (!_failures) { 889 gclog_or_tty->cr(); 890 gclog_or_tty->print_cr("----------"); 891 } 892 gclog_or_tty->print_cr("Missing rem set entry:"); 893 gclog_or_tty->print_cr("Field "PTR_FORMAT" " 894 "of obj "PTR_FORMAT", " 895 "in region "HR_FORMAT, 896 p, (void*) _containing_obj, 897 HR_FORMAT_PARAMS(from)); 898 _containing_obj->print_on(gclog_or_tty); 899 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " 900 "in region "HR_FORMAT, 901 (void*) obj, 902 HR_FORMAT_PARAMS(to)); 903 obj->print_on(gclog_or_tty); 904 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", 905 cv_obj, cv_field); 906 gclog_or_tty->print_cr("----------"); 907 gclog_or_tty->flush(); 908 _failures = true; 909 if (!failed) _n_failures++; 910 } 911 } 912 } 913 } 914 } 915 }; 916 917 // This really ought to be commoned up into OffsetTableContigSpace somehow. 918 // We would need a mechanism to make that code skip dead objects. 919 920 void HeapRegion::verify(VerifyOption vo, 921 bool* failures) const { 922 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 923 *failures = false; 924 HeapWord* p = bottom(); 925 HeapWord* prev_p = NULL; 926 VerifyLiveClosure vl_cl(g1, vo); 927 bool is_humongous = isHumongous(); 928 bool do_bot_verify = !is_young(); 929 size_t object_num = 0; 930 while (p < top()) { 931 oop obj = oop(p); 932 size_t obj_size = obj->size(); 933 object_num += 1; 934 935 if (is_humongous != g1->isHumongous(obj_size)) { 936 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 937 SIZE_FORMAT" words) in a %shumongous region", 938 p, g1->isHumongous(obj_size) ? "" : "non-", 939 obj_size, is_humongous ? "" : "non-"); 940 *failures = true; 941 return; 942 } 943 944 // If it returns false, verify_for_object() will output the 945 // appropriate messasge. 946 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) { 947 *failures = true; 948 return; 949 } 950 951 if (!g1->is_obj_dead_cond(obj, this, vo)) { 952 if (obj->is_oop()) { 953 Klass* klass = obj->klass(); 954 if (!klass->is_metaspace_object()) { 955 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 956 "not metadata", klass, (void *)obj); 957 *failures = true; 958 return; 959 } else if (!klass->is_klass()) { 960 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 961 "not a klass", klass, (void *)obj); 962 *failures = true; 963 return; 964 } else { 965 vl_cl.set_containing_obj(obj); 966 obj->oop_iterate_no_header(&vl_cl); 967 if (vl_cl.failures()) { 968 *failures = true; 969 } 970 if (G1MaxVerifyFailures >= 0 && 971 vl_cl.n_failures() >= G1MaxVerifyFailures) { 972 return; 973 } 974 } 975 } else { 976 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj); 977 *failures = true; 978 return; 979 } 980 } 981 prev_p = p; 982 p += obj_size; 983 } 984 985 if (p != top()) { 986 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " 987 "does not match top "PTR_FORMAT, p, top()); 988 *failures = true; 989 return; 990 } 991 992 HeapWord* the_end = end(); 993 assert(p == top(), "it should still hold"); 994 // Do some extra BOT consistency checking for addresses in the 995 // range [top, end). BOT look-ups in this range should yield 996 // top. No point in doing that if top == end (there's nothing there). 997 if (p < the_end) { 998 // Look up top 999 HeapWord* addr_1 = p; 1000 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); 1001 if (b_start_1 != p) { 1002 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " 1003 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1004 addr_1, b_start_1, p); 1005 *failures = true; 1006 return; 1007 } 1008 1009 // Look up top + 1 1010 HeapWord* addr_2 = p + 1; 1011 if (addr_2 < the_end) { 1012 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); 1013 if (b_start_2 != p) { 1014 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " 1015 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1016 addr_2, b_start_2, p); 1017 *failures = true; 1018 return; 1019 } 1020 } 1021 1022 // Look up an address between top and end 1023 size_t diff = pointer_delta(the_end, p) / 2; 1024 HeapWord* addr_3 = p + diff; 1025 if (addr_3 < the_end) { 1026 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); 1027 if (b_start_3 != p) { 1028 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " 1029 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1030 addr_3, b_start_3, p); 1031 *failures = true; 1032 return; 1033 } 1034 } 1035 1036 // Look up end - 1 1037 HeapWord* addr_4 = the_end - 1; 1038 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); 1039 if (b_start_4 != p) { 1040 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " 1041 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, 1042 addr_4, b_start_4, p); 1043 *failures = true; 1044 return; 1045 } 1046 } 1047 1048 if (is_humongous && object_num > 1) { 1049 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " 1050 "but has "SIZE_FORMAT", objects", 1051 bottom(), end(), object_num); 1052 *failures = true; 1053 return; 1054 } 1055 1056 verify_strong_code_roots(vo, failures); 1057 } 1058 1059 void HeapRegion::verify() const { 1060 bool dummy = false; 1061 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 1062 } 1063 1064 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1065 // away eventually. 1066 1067 void G1OffsetTableContigSpace::clear(bool mangle_space) { 1068 ContiguousSpace::clear(mangle_space); 1069 _offsets.zero_bottom_entry(); 1070 _offsets.initialize_threshold(); 1071 } 1072 1073 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 1074 Space::set_bottom(new_bottom); 1075 _offsets.set_bottom(new_bottom); 1076 } 1077 1078 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { 1079 Space::set_end(new_end); 1080 _offsets.resize(new_end - bottom()); 1081 } 1082 1083 void G1OffsetTableContigSpace::print() const { 1084 print_short(); 1085 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1086 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1087 bottom(), top(), _offsets.threshold(), end()); 1088 } 1089 1090 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { 1091 return _offsets.initialize_threshold(); 1092 } 1093 1094 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, 1095 HeapWord* end) { 1096 _offsets.alloc_block(start, end); 1097 return _offsets.threshold(); 1098 } 1099 1100 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { 1101 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1102 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1103 if (_gc_time_stamp < g1h->get_gc_time_stamp()) 1104 return top(); 1105 else 1106 return ContiguousSpace::saved_mark_word(); 1107 } 1108 1109 void G1OffsetTableContigSpace::set_saved_mark() { 1110 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1111 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1112 1113 if (_gc_time_stamp < curr_gc_time_stamp) { 1114 // The order of these is important, as another thread might be 1115 // about to start scanning this region. If it does so after 1116 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1117 // will be false, and it will pick up top() as the high water mark 1118 // of region. If it does so after _gc_time_stamp = ..., then it 1119 // will pick up the right saved_mark_word() as the high water mark 1120 // of the region. Either way, the behavior will be correct. 1121 ContiguousSpace::set_saved_mark(); 1122 OrderAccess::storestore(); 1123 _gc_time_stamp = curr_gc_time_stamp; 1124 // No need to do another barrier to flush the writes above. If 1125 // this is called in parallel with other threads trying to 1126 // allocate into the region, the caller should call this while 1127 // holding a lock and when the lock is released the writes will be 1128 // flushed. 1129 } 1130 } 1131 1132 G1OffsetTableContigSpace:: 1133 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1134 MemRegion mr) : 1135 _offsets(sharedOffsetArray, mr), 1136 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1137 _gc_time_stamp(0) 1138 { 1139 _offsets.set_space(this); 1140 // false ==> we'll do the clearing if there's clearing to be done. 1141 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); 1142 _offsets.zero_bottom_entry(); 1143 _offsets.initialize_threshold(); 1144 }