1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1HeapRegionTraceType.hpp" 30 #include "gc/g1/g1OopClosures.inline.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionBounds.inline.hpp" 33 #include "gc/g1/heapRegionManager.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/g1/heapRegionTracer.hpp" 36 #include "gc/shared/genOopClosures.inline.hpp" 37 #include "gc/shared/space.inline.hpp" 38 #include "logging/log.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/atomic.inline.hpp" 43 #include "runtime/orderAccess.inline.hpp" 44 45 int HeapRegion::LogOfHRGrainBytes = 0; 46 int HeapRegion::LogOfHRGrainWords = 0; 47 size_t HeapRegion::GrainBytes = 0; 48 size_t HeapRegion::GrainWords = 0; 49 size_t HeapRegion::CardsPerRegion = 0; 50 51 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 52 HeapRegion* hr, 53 G1ParPushHeapRSClosure* cl, 54 CardTableModRefBS::PrecisionStyle precision) : 55 DirtyCardToOopClosure(hr, cl, precision, NULL), 56 _hr(hr), _rs_scan(cl), _g1(g1) { } 57 58 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 59 OopClosure* oc) : 60 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 61 62 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 63 HeapWord* bottom, 64 HeapWord* top) { 65 G1CollectedHeap* g1h = _g1; 66 size_t oop_size; 67 HeapWord* cur = bottom; 68 69 // Start filtering what we add to the remembered set. If the object is 70 // not considered dead, either because it is marked (in the mark bitmap) 71 // or it was allocated after marking finished, then we add it. Otherwise 72 // we can safely ignore the object. 73 if (!g1h->is_obj_dead(oop(cur))) { 74 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr); 75 } else { 76 oop_size = _hr->block_size(cur); 77 } 78 79 cur += oop_size; 80 81 if (cur < top) { 82 oop cur_oop = oop(cur); 83 oop_size = _hr->block_size(cur); 84 HeapWord* next_obj = cur + oop_size; 85 while (next_obj < top) { 86 // Keep filtering the remembered set. 87 if (!g1h->is_obj_dead(cur_oop)) { 88 // Bottom lies entirely below top, so we can call the 89 // non-memRegion version of oop_iterate below. 90 cur_oop->oop_iterate(_rs_scan); 91 } 92 cur = next_obj; 93 cur_oop = oop(cur); 94 oop_size = _hr->block_size(cur); 95 next_obj = cur + oop_size; 96 } 97 98 // Last object. Need to do dead-obj filtering here too. 99 if (!g1h->is_obj_dead(oop(cur))) { 100 oop(cur)->oop_iterate(_rs_scan, mr); 101 } 102 } 103 } 104 105 size_t HeapRegion::max_region_size() { 106 return HeapRegionBounds::max_size(); 107 } 108 109 size_t HeapRegion::min_region_size_in_words() { 110 return HeapRegionBounds::min_size() >> LogHeapWordSize; 111 } 112 113 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 114 size_t region_size = G1HeapRegionSize; 115 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 116 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 117 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 118 HeapRegionBounds::min_size()); 119 } 120 121 int region_size_log = log2_long((jlong) region_size); 122 // Recalculate the region size to make sure it's a power of 123 // 2. This means that region_size is the largest power of 2 that's 124 // <= what we've calculated so far. 125 region_size = ((size_t)1 << region_size_log); 126 127 // Now make sure that we don't go over or under our limits. 128 if (region_size < HeapRegionBounds::min_size()) { 129 region_size = HeapRegionBounds::min_size(); 130 } else if (region_size > HeapRegionBounds::max_size()) { 131 region_size = HeapRegionBounds::max_size(); 132 } 133 134 // And recalculate the log. 135 region_size_log = log2_long((jlong) region_size); 136 137 // Now, set up the globals. 138 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 139 LogOfHRGrainBytes = region_size_log; 140 141 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 142 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 143 144 guarantee(GrainBytes == 0, "we should only set it once"); 145 // The cast to int is safe, given that we've bounded region_size by 146 // MIN_REGION_SIZE and MAX_REGION_SIZE. 147 GrainBytes = region_size; 148 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M); 149 150 guarantee(GrainWords == 0, "we should only set it once"); 151 GrainWords = GrainBytes >> LogHeapWordSize; 152 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 153 154 guarantee(CardsPerRegion == 0, "we should only set it once"); 155 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 156 157 if (G1HeapRegionSize != GrainBytes) { 158 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); 159 } 160 } 161 162 void HeapRegion::reset_after_compaction() { 163 G1ContiguousSpace::reset_after_compaction(); 164 // After a compaction the mark bitmap is invalid, so we must 165 // treat all objects as being inside the unmarked area. 166 zero_marked_bytes(); 167 init_top_at_mark_start(); 168 } 169 170 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 171 assert(_humongous_start_region == NULL, 172 "we should have already filtered out humongous regions"); 173 assert(!in_collection_set(), 174 "Should not clear heap region %u in the collection set", hrm_index()); 175 176 set_allocation_context(AllocationContext::system()); 177 set_young_index_in_cset(-1); 178 uninstall_surv_rate_group(); 179 set_free(); 180 reset_pre_dummy_top(); 181 182 if (!par) { 183 // If this is parallel, this will be done later. 184 HeapRegionRemSet* hrrs = rem_set(); 185 if (locked) { 186 hrrs->clear_locked(); 187 } else { 188 hrrs->clear(); 189 } 190 } 191 zero_marked_bytes(); 192 193 init_top_at_mark_start(); 194 _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp(); 195 if (clear_space) clear(SpaceDecorator::Mangle); 196 } 197 198 void HeapRegion::par_clear() { 199 assert(used() == 0, "the region should have been already cleared"); 200 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 201 HeapRegionRemSet* hrrs = rem_set(); 202 hrrs->clear(); 203 CardTableModRefBS* ct_bs = 204 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set()); 205 ct_bs->clear(MemRegion(bottom(), end())); 206 } 207 208 void HeapRegion::calc_gc_efficiency() { 209 // GC efficiency is the ratio of how much space would be 210 // reclaimed over how long we predict it would take to reclaim it. 211 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 212 G1Policy* g1p = g1h->g1_policy(); 213 214 // Retrieve a prediction of the elapsed time for this region for 215 // a mixed gc because the region will only be evacuated during a 216 // mixed gc. 217 double region_elapsed_time_ms = 218 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 219 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 220 } 221 222 void HeapRegion::set_free() { 223 report_region_type_change(G1HeapRegionTraceType::Free); 224 _type.set_free(); 225 } 226 227 void HeapRegion::set_eden() { 228 report_region_type_change(G1HeapRegionTraceType::Eden); 229 _type.set_eden(); 230 } 231 232 void HeapRegion::set_eden_pre_gc() { 233 report_region_type_change(G1HeapRegionTraceType::Eden); 234 _type.set_eden_pre_gc(); 235 } 236 237 void HeapRegion::set_survivor() { 238 report_region_type_change(G1HeapRegionTraceType::Survivor); 239 _type.set_survivor(); 240 } 241 242 void HeapRegion::set_old() { 243 report_region_type_change(G1HeapRegionTraceType::Old); 244 _type.set_old(); 245 } 246 247 void HeapRegion::set_archive() { 248 report_region_type_change(G1HeapRegionTraceType::Archive); 249 _type.set_archive(); 250 } 251 252 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { 253 assert(!is_humongous(), "sanity / pre-condition"); 254 assert(top() == bottom(), "should be empty"); 255 256 report_region_type_change(G1HeapRegionTraceType::StartsHumongous); 257 _type.set_starts_humongous(); 258 _humongous_start_region = this; 259 260 _bot_part.set_for_starts_humongous(obj_top, fill_size); 261 } 262 263 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 264 assert(!is_humongous(), "sanity / pre-condition"); 265 assert(top() == bottom(), "should be empty"); 266 assert(first_hr->is_starts_humongous(), "pre-condition"); 267 268 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); 269 _type.set_continues_humongous(); 270 _humongous_start_region = first_hr; 271 } 272 273 void HeapRegion::clear_humongous() { 274 assert(is_humongous(), "pre-condition"); 275 276 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 277 _humongous_start_region = NULL; 278 } 279 280 HeapRegion::HeapRegion(uint hrm_index, 281 G1BlockOffsetTable* bot, 282 MemRegion mr) : 283 G1ContiguousSpace(bot), 284 _hrm_index(hrm_index), 285 _allocation_context(AllocationContext::system()), 286 _humongous_start_region(NULL), 287 _next_in_special_set(NULL), 288 _evacuation_failed(false), 289 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 290 _next_young_region(NULL), 291 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), 292 #ifdef ASSERT 293 _containing_set(NULL), 294 #endif // ASSERT 295 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 296 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 297 _predicted_bytes_to_copy(0) 298 { 299 _rem_set = new HeapRegionRemSet(bot, this); 300 301 initialize(mr); 302 } 303 304 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 305 assert(_rem_set->is_empty(), "Remembered set must be empty"); 306 307 G1ContiguousSpace::initialize(mr, clear_space, mangle_space); 308 309 hr_clear(false /*par*/, false /*clear_space*/); 310 set_top(bottom()); 311 record_timestamp(); 312 } 313 314 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { 315 HeapRegionTracer::send_region_type_change(_hrm_index, 316 get_trace_type(), 317 to, 318 (uintptr_t)bottom(), 319 used(), 320 (uint)allocation_context()); 321 } 322 323 CompactibleSpace* HeapRegion::next_compaction_space() const { 324 return G1CollectedHeap::heap()->next_compaction_region(this); 325 } 326 327 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 328 bool during_conc_mark) { 329 // We always recreate the prev marking info and we'll explicitly 330 // mark all objects we find to be self-forwarded on the prev 331 // bitmap. So all objects need to be below PTAMS. 332 _prev_marked_bytes = 0; 333 334 if (during_initial_mark) { 335 // During initial-mark, we'll also explicitly mark all objects 336 // we find to be self-forwarded on the next bitmap. So all 337 // objects need to be below NTAMS. 338 _next_top_at_mark_start = top(); 339 _next_marked_bytes = 0; 340 } else if (during_conc_mark) { 341 // During concurrent mark, all objects in the CSet (including 342 // the ones we find to be self-forwarded) are implicitly live. 343 // So all objects need to be above NTAMS. 344 _next_top_at_mark_start = bottom(); 345 _next_marked_bytes = 0; 346 } 347 } 348 349 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, 350 bool during_conc_mark, 351 size_t marked_bytes) { 352 assert(marked_bytes <= used(), 353 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 354 _prev_top_at_mark_start = top(); 355 _prev_marked_bytes = marked_bytes; 356 } 357 358 HeapWord* 359 HeapRegion::object_iterate_mem_careful(MemRegion mr, 360 ObjectClosure* cl) { 361 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 362 // We used to use "block_start_careful" here. But we're actually happy 363 // to update the BOT while we do this... 364 HeapWord* cur = block_start(mr.start()); 365 mr = mr.intersection(used_region()); 366 if (mr.is_empty()) return NULL; 367 // Otherwise, find the obj that extends onto mr.start(). 368 369 assert(cur <= mr.start() 370 && (oop(cur)->klass_or_null() == NULL || 371 cur + oop(cur)->size() > mr.start()), 372 "postcondition of block_start"); 373 oop obj; 374 while (cur < mr.end()) { 375 obj = oop(cur); 376 if (obj->klass_or_null() == NULL) { 377 // Ran into an unparseable point. 378 return cur; 379 } else if (!g1h->is_obj_dead(obj)) { 380 cl->do_object(obj); 381 } 382 cur += block_size(cur); 383 } 384 return NULL; 385 } 386 387 HeapWord* 388 HeapRegion:: 389 oops_on_card_seq_iterate_careful(MemRegion mr, 390 FilterOutOfRegionClosure* cl, 391 bool filter_young, 392 jbyte* card_ptr) { 393 // Currently, we should only have to clean the card if filter_young 394 // is true and vice versa. 395 if (filter_young) { 396 assert(card_ptr != NULL, "pre-condition"); 397 } else { 398 assert(card_ptr == NULL, "pre-condition"); 399 } 400 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 401 402 // If we're within a stop-world GC, then we might look at a card in a 403 // GC alloc region that extends onto a GC LAB, which may not be 404 // parseable. Stop such at the "scan_top" of the region. 405 if (g1h->is_gc_active()) { 406 mr = mr.intersection(MemRegion(bottom(), scan_top())); 407 } else { 408 mr = mr.intersection(used_region()); 409 } 410 if (mr.is_empty()) return NULL; 411 // Otherwise, find the obj that extends onto mr.start(). 412 413 // The intersection of the incoming mr (for the card) and the 414 // allocated part of the region is non-empty. This implies that 415 // we have actually allocated into this region. The code in 416 // G1CollectedHeap.cpp that allocates a new region sets the 417 // is_young tag on the region before allocating. Thus we 418 // safely know if this region is young. 419 if (is_young() && filter_young) { 420 return NULL; 421 } 422 423 assert(!is_young(), "check value of filter_young"); 424 425 // We can only clean the card here, after we make the decision that 426 // the card is not young. And we only clean the card if we have been 427 // asked to (i.e., card_ptr != NULL). 428 if (card_ptr != NULL) { 429 *card_ptr = CardTableModRefBS::clean_card_val(); 430 // We must complete this write before we do any of the reads below. 431 OrderAccess::storeload(); 432 } 433 434 // Cache the boundaries of the memory region in some const locals 435 HeapWord* const start = mr.start(); 436 HeapWord* const end = mr.end(); 437 438 // We used to use "block_start_careful" here. But we're actually happy 439 // to update the BOT while we do this... 440 HeapWord* cur = block_start(start); 441 assert(cur <= start, "Postcondition"); 442 443 oop obj; 444 445 HeapWord* next = cur; 446 do { 447 cur = next; 448 obj = oop(cur); 449 if (obj->klass_or_null() == NULL) { 450 // Ran into an unparseable point. 451 return cur; 452 } 453 // Otherwise... 454 next = cur + block_size(cur); 455 } while (next <= start); 456 457 // If we finish the above loop...We have a parseable object that 458 // begins on or before the start of the memory region, and ends 459 // inside or spans the entire region. 460 assert(cur <= start, "Loop postcondition"); 461 assert(obj->klass_or_null() != NULL, "Loop postcondition"); 462 463 do { 464 obj = oop(cur); 465 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant"); 466 if (obj->klass_or_null() == NULL) { 467 // Ran into an unparseable point. 468 return cur; 469 } 470 471 // Advance the current pointer. "obj" still points to the object to iterate. 472 cur = cur + block_size(cur); 473 474 if (!g1h->is_obj_dead(obj)) { 475 // Non-objArrays are sometimes marked imprecise at the object start. We 476 // always need to iterate over them in full. 477 // We only iterate over object arrays in full if they are completely contained 478 // in the memory region. 479 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 480 obj->oop_iterate(cl); 481 } else { 482 obj->oop_iterate(cl, mr); 483 } 484 } 485 } while (cur < end); 486 487 return NULL; 488 } 489 490 // Code roots support 491 492 void HeapRegion::add_strong_code_root(nmethod* nm) { 493 HeapRegionRemSet* hrrs = rem_set(); 494 hrrs->add_strong_code_root(nm); 495 } 496 497 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 498 assert_locked_or_safepoint(CodeCache_lock); 499 HeapRegionRemSet* hrrs = rem_set(); 500 hrrs->add_strong_code_root_locked(nm); 501 } 502 503 void HeapRegion::remove_strong_code_root(nmethod* nm) { 504 HeapRegionRemSet* hrrs = rem_set(); 505 hrrs->remove_strong_code_root(nm); 506 } 507 508 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 509 HeapRegionRemSet* hrrs = rem_set(); 510 hrrs->strong_code_roots_do(blk); 511 } 512 513 class VerifyStrongCodeRootOopClosure: public OopClosure { 514 const HeapRegion* _hr; 515 nmethod* _nm; 516 bool _failures; 517 bool _has_oops_in_region; 518 519 template <class T> void do_oop_work(T* p) { 520 T heap_oop = oopDesc::load_heap_oop(p); 521 if (!oopDesc::is_null(heap_oop)) { 522 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 523 524 // Note: not all the oops embedded in the nmethod are in the 525 // current region. We only look at those which are. 526 if (_hr->is_in(obj)) { 527 // Object is in the region. Check that its less than top 528 if (_hr->top() <= (HeapWord*)obj) { 529 // Object is above top 530 log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT, 531 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 532 _failures = true; 533 return; 534 } 535 // Nmethod has at least one oop in the current region 536 _has_oops_in_region = true; 537 } 538 } 539 } 540 541 public: 542 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): 543 _hr(hr), _failures(false), _has_oops_in_region(false) {} 544 545 void do_oop(narrowOop* p) { do_oop_work(p); } 546 void do_oop(oop* p) { do_oop_work(p); } 547 548 bool failures() { return _failures; } 549 bool has_oops_in_region() { return _has_oops_in_region; } 550 }; 551 552 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 553 const HeapRegion* _hr; 554 bool _failures; 555 public: 556 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 557 _hr(hr), _failures(false) {} 558 559 void do_code_blob(CodeBlob* cb) { 560 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); 561 if (nm != NULL) { 562 // Verify that the nemthod is live 563 if (!nm->is_alive()) { 564 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 565 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 566 _failures = true; 567 } else { 568 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); 569 nm->oops_do(&oop_cl); 570 if (!oop_cl.has_oops_in_region()) { 571 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 572 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 573 _failures = true; 574 } else if (oop_cl.failures()) { 575 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 576 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 577 _failures = true; 578 } 579 } 580 } 581 } 582 583 bool failures() { return _failures; } 584 }; 585 586 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 587 if (!G1VerifyHeapRegionCodeRoots) { 588 // We're not verifying code roots. 589 return; 590 } 591 if (vo == VerifyOption_G1UseMarkWord) { 592 // Marking verification during a full GC is performed after class 593 // unloading, code cache unloading, etc so the strong code roots 594 // attached to each heap region are in an inconsistent state. They won't 595 // be consistent until the strong code roots are rebuilt after the 596 // actual GC. Skip verifying the strong code roots in this particular 597 // time. 598 assert(VerifyDuringGC, "only way to get here"); 599 return; 600 } 601 602 HeapRegionRemSet* hrrs = rem_set(); 603 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 604 605 // if this region is empty then there should be no entries 606 // on its strong code root list 607 if (is_empty()) { 608 if (strong_code_roots_length > 0) { 609 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries", 610 p2i(bottom()), p2i(end()), strong_code_roots_length); 611 *failures = true; 612 } 613 return; 614 } 615 616 if (is_continues_humongous()) { 617 if (strong_code_roots_length > 0) { 618 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 619 HR_FORMAT_PARAMS(this), strong_code_roots_length); 620 *failures = true; 621 } 622 return; 623 } 624 625 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 626 strong_code_roots_do(&cb_cl); 627 628 if (cb_cl.failures()) { 629 *failures = true; 630 } 631 } 632 633 void HeapRegion::print() const { print_on(tty); } 634 void HeapRegion::print_on(outputStream* st) const { 635 st->print("|%4u", this->_hrm_index); 636 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, 637 p2i(bottom()), p2i(top()), p2i(end())); 638 st->print("|%3d%%", (int) ((double) used() * 100 / capacity())); 639 st->print("|%2s", get_short_type_str()); 640 if (in_collection_set()) { 641 st->print("|CS"); 642 } else { 643 st->print("| "); 644 } 645 st->print("|TS%3u", _gc_time_stamp); 646 st->print("|AC%3u", allocation_context()); 647 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|", 648 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start())); 649 } 650 651 class G1VerificationClosure : public OopClosure { 652 protected: 653 G1CollectedHeap* _g1h; 654 CardTableModRefBS* _bs; 655 oop _containing_obj; 656 bool _failures; 657 int _n_failures; 658 VerifyOption _vo; 659 public: 660 // _vo == UsePrevMarking -> use "prev" marking information, 661 // _vo == UseNextMarking -> use "next" marking information, 662 // _vo == UseMarkWord -> use mark word from object header. 663 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 664 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 665 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { 666 } 667 668 void set_containing_obj(oop obj) { 669 _containing_obj = obj; 670 } 671 672 bool failures() { return _failures; } 673 int n_failures() { return _n_failures; } 674 675 void print_object(outputStream* out, oop obj) { 676 #ifdef PRODUCT 677 Klass* k = obj->klass(); 678 const char* class_name = k->external_name(); 679 out->print_cr("class name %s", class_name); 680 #else // PRODUCT 681 obj->print_on(out); 682 #endif // PRODUCT 683 } 684 }; 685 686 class VerifyLiveClosure : public G1VerificationClosure { 687 public: 688 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 689 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 690 virtual void do_oop(oop* p) { do_oop_work(p); } 691 692 template <class T> 693 void do_oop_work(T* p) { 694 assert(_containing_obj != NULL, "Precondition"); 695 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 696 "Precondition"); 697 verify_liveness(p); 698 } 699 700 template <class T> 701 void verify_liveness(T* p) { 702 T heap_oop = oopDesc::load_heap_oop(p); 703 Log(gc, verify) log; 704 if (!oopDesc::is_null(heap_oop)) { 705 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 706 bool failed = false; 707 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 708 MutexLockerEx x(ParGCRareEvent_lock, 709 Mutex::_no_safepoint_check_flag); 710 711 if (!_failures) { 712 log.error("----------"); 713 } 714 ResourceMark rm; 715 if (!_g1h->is_in_closed_subset(obj)) { 716 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 717 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 718 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 719 print_object(log.error_stream(), _containing_obj); 720 log.error("points to obj " PTR_FORMAT " not in the heap", p2i(obj)); 721 } else { 722 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 723 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 724 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 725 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 726 print_object(log.error_stream(), _containing_obj); 727 log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 728 p2i(obj), p2i(to->bottom()), p2i(to->end())); 729 print_object(log.error_stream(), obj); 730 } 731 log.error("----------"); 732 _failures = true; 733 failed = true; 734 _n_failures++; 735 } 736 } 737 } 738 }; 739 740 class VerifyRemSetClosure : public G1VerificationClosure { 741 public: 742 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 743 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 744 virtual void do_oop(oop* p) { do_oop_work(p); } 745 746 template <class T> 747 void do_oop_work(T* p) { 748 assert(_containing_obj != NULL, "Precondition"); 749 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 750 "Precondition"); 751 verify_remembered_set(p); 752 } 753 754 template <class T> 755 void verify_remembered_set(T* p) { 756 T heap_oop = oopDesc::load_heap_oop(p); 757 Log(gc, verify) log; 758 if (!oopDesc::is_null(heap_oop)) { 759 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 760 bool failed = false; 761 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 762 HeapRegion* to = _g1h->heap_region_containing(obj); 763 if (from != NULL && to != NULL && 764 from != to && 765 !to->is_pinned()) { 766 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 767 jbyte cv_field = *_bs->byte_for_const(p); 768 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 769 770 bool is_bad = !(from->is_young() 771 || to->rem_set()->contains_reference(p) 772 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 773 (_containing_obj->is_objArray() ? 774 cv_field == dirty 775 : cv_obj == dirty || cv_field == dirty)); 776 if (is_bad) { 777 MutexLockerEx x(ParGCRareEvent_lock, 778 Mutex::_no_safepoint_check_flag); 779 780 if (!_failures) { 781 log.error("----------"); 782 } 783 log.error("Missing rem set entry:"); 784 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT, 785 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 786 ResourceMark rm; 787 _containing_obj->print_on(log.error_stream()); 788 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to)); 789 if (obj->is_oop()) { 790 obj->print_on(log.error_stream()); 791 } 792 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 793 log.error("----------"); 794 _failures = true; 795 if (!failed) _n_failures++; 796 } 797 } 798 } 799 } 800 }; 801 802 // This really ought to be commoned up into OffsetTableContigSpace somehow. 803 // We would need a mechanism to make that code skip dead objects. 804 805 void HeapRegion::verify(VerifyOption vo, 806 bool* failures) const { 807 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 808 *failures = false; 809 HeapWord* p = bottom(); 810 HeapWord* prev_p = NULL; 811 VerifyLiveClosure vl_cl(g1, vo); 812 VerifyRemSetClosure vr_cl(g1, vo); 813 bool is_region_humongous = is_humongous(); 814 size_t object_num = 0; 815 while (p < top()) { 816 oop obj = oop(p); 817 size_t obj_size = block_size(p); 818 object_num += 1; 819 820 if (!g1->is_obj_dead_cond(obj, this, vo)) { 821 if (obj->is_oop()) { 822 Klass* klass = obj->klass(); 823 bool is_metaspace_object = Metaspace::contains(klass) || 824 (vo == VerifyOption_G1UsePrevMarking && 825 ClassLoaderDataGraph::unload_list_contains(klass)); 826 if (!is_metaspace_object) { 827 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 828 "not metadata", p2i(klass), p2i(obj)); 829 *failures = true; 830 return; 831 } else if (!klass->is_klass()) { 832 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 833 "not a klass", p2i(klass), p2i(obj)); 834 *failures = true; 835 return; 836 } else { 837 vl_cl.set_containing_obj(obj); 838 if (!g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) { 839 // verify liveness and rem_set 840 vr_cl.set_containing_obj(obj); 841 G1Mux2Closure mux(&vl_cl, &vr_cl); 842 obj->oop_iterate_no_header(&mux); 843 844 if (vr_cl.failures()) { 845 *failures = true; 846 } 847 if (G1MaxVerifyFailures >= 0 && 848 vr_cl.n_failures() >= G1MaxVerifyFailures) { 849 return; 850 } 851 } else { 852 // verify only liveness 853 obj->oop_iterate_no_header(&vl_cl); 854 } 855 if (vl_cl.failures()) { 856 *failures = true; 857 } 858 if (G1MaxVerifyFailures >= 0 && 859 vl_cl.n_failures() >= G1MaxVerifyFailures) { 860 return; 861 } 862 } 863 } else { 864 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 865 *failures = true; 866 return; 867 } 868 } 869 prev_p = p; 870 p += obj_size; 871 } 872 873 if (!is_young() && !is_empty()) { 874 _bot_part.verify(); 875 } 876 877 if (is_region_humongous) { 878 oop obj = oop(this->humongous_start_region()->bottom()); 879 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 880 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 881 *failures = true; 882 return; 883 } 884 } 885 886 if (!is_region_humongous && p != top()) { 887 log_error(gc, verify)("end of last object " PTR_FORMAT " " 888 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 889 *failures = true; 890 return; 891 } 892 893 HeapWord* the_end = end(); 894 // Do some extra BOT consistency checking for addresses in the 895 // range [top, end). BOT look-ups in this range should yield 896 // top. No point in doing that if top == end (there's nothing there). 897 if (p < the_end) { 898 // Look up top 899 HeapWord* addr_1 = p; 900 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); 901 if (b_start_1 != p) { 902 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " 903 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 904 p2i(addr_1), p2i(b_start_1), p2i(p)); 905 *failures = true; 906 return; 907 } 908 909 // Look up top + 1 910 HeapWord* addr_2 = p + 1; 911 if (addr_2 < the_end) { 912 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); 913 if (b_start_2 != p) { 914 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 915 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 916 p2i(addr_2), p2i(b_start_2), p2i(p)); 917 *failures = true; 918 return; 919 } 920 } 921 922 // Look up an address between top and end 923 size_t diff = pointer_delta(the_end, p) / 2; 924 HeapWord* addr_3 = p + diff; 925 if (addr_3 < the_end) { 926 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); 927 if (b_start_3 != p) { 928 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 929 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 930 p2i(addr_3), p2i(b_start_3), p2i(p)); 931 *failures = true; 932 return; 933 } 934 } 935 936 // Look up end - 1 937 HeapWord* addr_4 = the_end - 1; 938 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); 939 if (b_start_4 != p) { 940 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 941 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 942 p2i(addr_4), p2i(b_start_4), p2i(p)); 943 *failures = true; 944 return; 945 } 946 } 947 948 verify_strong_code_roots(vo, failures); 949 } 950 951 void HeapRegion::verify() const { 952 bool dummy = false; 953 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 954 } 955 956 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 957 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 958 *failures = false; 959 HeapWord* p = bottom(); 960 HeapWord* prev_p = NULL; 961 VerifyRemSetClosure vr_cl(g1, vo); 962 while (p < top()) { 963 oop obj = oop(p); 964 size_t obj_size = block_size(p); 965 966 if (!g1->is_obj_dead_cond(obj, this, vo)) { 967 if (obj->is_oop()) { 968 vr_cl.set_containing_obj(obj); 969 obj->oop_iterate_no_header(&vr_cl); 970 971 if (vr_cl.failures()) { 972 *failures = true; 973 } 974 if (G1MaxVerifyFailures >= 0 && 975 vr_cl.n_failures() >= G1MaxVerifyFailures) { 976 return; 977 } 978 } else { 979 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 980 *failures = true; 981 return; 982 } 983 } 984 985 prev_p = p; 986 p += obj_size; 987 } 988 } 989 990 void HeapRegion::verify_rem_set() const { 991 bool failures = false; 992 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 993 guarantee(!failures, "HeapRegion RemSet verification failed"); 994 } 995 996 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 997 scan_and_forward(this, cp); 998 } 999 1000 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1001 // away eventually. 1002 1003 void G1ContiguousSpace::clear(bool mangle_space) { 1004 set_top(bottom()); 1005 _scan_top = bottom(); 1006 CompactibleSpace::clear(mangle_space); 1007 reset_bot(); 1008 } 1009 1010 #ifndef PRODUCT 1011 void G1ContiguousSpace::mangle_unused_area() { 1012 mangle_unused_area_complete(); 1013 } 1014 1015 void G1ContiguousSpace::mangle_unused_area_complete() { 1016 SpaceMangler::mangle_region(MemRegion(top(), end())); 1017 } 1018 #endif 1019 1020 void G1ContiguousSpace::print() const { 1021 print_short(); 1022 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 1023 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 1024 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); 1025 } 1026 1027 HeapWord* G1ContiguousSpace::initialize_threshold() { 1028 return _bot_part.initialize_threshold(); 1029 } 1030 1031 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, 1032 HeapWord* end) { 1033 _bot_part.alloc_block(start, end); 1034 return _bot_part.threshold(); 1035 } 1036 1037 HeapWord* G1ContiguousSpace::scan_top() const { 1038 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1039 HeapWord* local_top = top(); 1040 OrderAccess::loadload(); 1041 const unsigned local_time_stamp = _gc_time_stamp; 1042 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); 1043 if (local_time_stamp < g1h->get_gc_time_stamp()) { 1044 return local_top; 1045 } else { 1046 return _scan_top; 1047 } 1048 } 1049 1050 void G1ContiguousSpace::record_timestamp() { 1051 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1052 uint curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1053 1054 if (_gc_time_stamp < curr_gc_time_stamp) { 1055 // Setting the time stamp here tells concurrent readers to look at 1056 // scan_top to know the maximum allowed address to look at. 1057 1058 // scan_top should be bottom for all regions except for the 1059 // retained old alloc region which should have scan_top == top 1060 HeapWord* st = _scan_top; 1061 guarantee(st == _bottom || st == _top, "invariant"); 1062 1063 _gc_time_stamp = curr_gc_time_stamp; 1064 } 1065 } 1066 1067 void G1ContiguousSpace::record_retained_region() { 1068 // scan_top is the maximum address where it's safe for the next gc to 1069 // scan this region. 1070 _scan_top = top(); 1071 } 1072 1073 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 1074 object_iterate(blk); 1075 } 1076 1077 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { 1078 HeapWord* p = bottom(); 1079 while (p < top()) { 1080 if (block_is_obj(p)) { 1081 blk->do_object(oop(p)); 1082 } 1083 p += block_size(p); 1084 } 1085 } 1086 1087 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : 1088 _bot_part(bot, this), 1089 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1090 _gc_time_stamp(0) 1091 { 1092 } 1093 1094 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1095 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1096 _top = bottom(); 1097 _scan_top = bottom(); 1098 set_saved_mark_word(NULL); 1099 reset_bot(); 1100 } 1101