1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1HeapRegionTraceType.hpp" 30 #include "gc/g1/g1OopClosures.inline.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionBounds.inline.hpp" 33 #include "gc/g1/heapRegionManager.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/g1/heapRegionTracer.hpp" 36 #include "gc/shared/genOopClosures.inline.hpp" 37 #include "gc/shared/space.inline.hpp" 38 #include "logging/log.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/orderAccess.inline.hpp" 44 45 int HeapRegion::LogOfHRGrainBytes = 0; 46 int HeapRegion::LogOfHRGrainWords = 0; 47 size_t HeapRegion::GrainBytes = 0; 48 size_t HeapRegion::GrainWords = 0; 49 size_t HeapRegion::CardsPerRegion = 0; 50 51 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 52 HeapRegion* hr, 53 G1ParPushHeapRSClosure* cl, 54 CardTableModRefBS::PrecisionStyle precision) : 55 DirtyCardToOopClosure(hr, cl, precision, NULL), 56 _hr(hr), _rs_scan(cl), _g1(g1) { } 57 58 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 59 OopClosure* oc) : 60 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 61 62 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 63 HeapWord* bottom, 64 HeapWord* top) { 65 G1CollectedHeap* g1h = _g1; 66 size_t oop_size; 67 HeapWord* cur = bottom; 68 69 // Start filtering what we add to the remembered set. If the object is 70 // not considered dead, either because it is marked (in the mark bitmap) 71 // or it was allocated after marking finished, then we add it. Otherwise 72 // we can safely ignore the object. 73 if (!g1h->is_obj_dead(oop(cur))) { 74 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr); 75 } else { 76 oop_size = _hr->block_size(cur); 77 } 78 79 cur += oop_size; 80 81 if (cur < top) { 82 oop cur_oop = oop(cur); 83 oop_size = _hr->block_size(cur); 84 HeapWord* next_obj = cur + oop_size; 85 while (next_obj < top) { 86 // Keep filtering the remembered set. 87 if (!g1h->is_obj_dead(cur_oop)) { 88 // Bottom lies entirely below top, so we can call the 89 // non-memRegion version of oop_iterate below. 90 cur_oop->oop_iterate(_rs_scan); 91 } 92 cur = next_obj; 93 cur_oop = oop(cur); 94 oop_size = _hr->block_size(cur); 95 next_obj = cur + oop_size; 96 } 97 98 // Last object. Need to do dead-obj filtering here too. 99 if (!g1h->is_obj_dead(oop(cur))) { 100 oop(cur)->oop_iterate(_rs_scan, mr); 101 } 102 } 103 } 104 105 size_t HeapRegion::max_region_size() { 106 return HeapRegionBounds::max_size(); 107 } 108 109 size_t HeapRegion::min_region_size_in_words() { 110 return HeapRegionBounds::min_size() >> LogHeapWordSize; 111 } 112 113 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 114 size_t region_size = G1HeapRegionSize; 115 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 116 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 117 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 118 HeapRegionBounds::min_size()); 119 } 120 121 int region_size_log = log2_long((jlong) region_size); 122 // Recalculate the region size to make sure it's a power of 123 // 2. This means that region_size is the largest power of 2 that's 124 // <= what we've calculated so far. 125 region_size = ((size_t)1 << region_size_log); 126 127 // Now make sure that we don't go over or under our limits. 128 if (region_size < HeapRegionBounds::min_size()) { 129 region_size = HeapRegionBounds::min_size(); 130 } else if (region_size > HeapRegionBounds::max_size()) { 131 region_size = HeapRegionBounds::max_size(); 132 } 133 134 // And recalculate the log. 135 region_size_log = log2_long((jlong) region_size); 136 137 // Now, set up the globals. 138 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 139 LogOfHRGrainBytes = region_size_log; 140 141 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 142 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 143 144 guarantee(GrainBytes == 0, "we should only set it once"); 145 // The cast to int is safe, given that we've bounded region_size by 146 // MIN_REGION_SIZE and MAX_REGION_SIZE. 147 GrainBytes = region_size; 148 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M); 149 150 guarantee(GrainWords == 0, "we should only set it once"); 151 GrainWords = GrainBytes >> LogHeapWordSize; 152 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 153 154 guarantee(CardsPerRegion == 0, "we should only set it once"); 155 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 156 157 if (G1HeapRegionSize != GrainBytes) { 158 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); 159 } 160 } 161 162 void HeapRegion::reset_after_compaction() { 163 G1ContiguousSpace::reset_after_compaction(); 164 // After a compaction the mark bitmap is invalid, so we must 165 // treat all objects as being inside the unmarked area. 166 zero_marked_bytes(); 167 init_top_at_mark_start(); 168 } 169 170 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) { 171 assert(_humongous_start_region == NULL, 172 "we should have already filtered out humongous regions"); 173 assert(!in_collection_set(), 174 "Should not clear heap region %u in the collection set", hrm_index()); 175 176 set_allocation_context(AllocationContext::system()); 177 set_young_index_in_cset(-1); 178 uninstall_surv_rate_group(); 179 set_free(); 180 reset_pre_dummy_top(); 181 182 if (!keep_remset) { 183 if (locked) { 184 rem_set()->clear_locked(); 185 } else { 186 rem_set()->clear(); 187 } 188 } 189 190 zero_marked_bytes(); 191 192 init_top_at_mark_start(); 193 _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp(); 194 if (clear_space) clear(SpaceDecorator::Mangle); 195 } 196 197 void HeapRegion::par_clear() { 198 assert(used() == 0, "the region should have been already cleared"); 199 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 200 HeapRegionRemSet* hrrs = rem_set(); 201 hrrs->clear(); 202 CardTableModRefBS* ct_bs = 203 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set()); 204 ct_bs->clear(MemRegion(bottom(), end())); 205 } 206 207 void HeapRegion::calc_gc_efficiency() { 208 // GC efficiency is the ratio of how much space would be 209 // reclaimed over how long we predict it would take to reclaim it. 210 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 211 G1Policy* g1p = g1h->g1_policy(); 212 213 // Retrieve a prediction of the elapsed time for this region for 214 // a mixed gc because the region will only be evacuated during a 215 // mixed gc. 216 double region_elapsed_time_ms = 217 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 218 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 219 } 220 221 void HeapRegion::set_free() { 222 report_region_type_change(G1HeapRegionTraceType::Free); 223 _type.set_free(); 224 } 225 226 void HeapRegion::set_eden() { 227 report_region_type_change(G1HeapRegionTraceType::Eden); 228 _type.set_eden(); 229 } 230 231 void HeapRegion::set_eden_pre_gc() { 232 report_region_type_change(G1HeapRegionTraceType::Eden); 233 _type.set_eden_pre_gc(); 234 } 235 236 void HeapRegion::set_survivor() { 237 report_region_type_change(G1HeapRegionTraceType::Survivor); 238 _type.set_survivor(); 239 } 240 241 void HeapRegion::set_old() { 242 report_region_type_change(G1HeapRegionTraceType::Old); 243 _type.set_old(); 244 } 245 246 void HeapRegion::set_archive() { 247 report_region_type_change(G1HeapRegionTraceType::Archive); 248 _type.set_archive(); 249 } 250 251 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { 252 assert(!is_humongous(), "sanity / pre-condition"); 253 assert(top() == bottom(), "should be empty"); 254 255 report_region_type_change(G1HeapRegionTraceType::StartsHumongous); 256 _type.set_starts_humongous(); 257 _humongous_start_region = this; 258 259 _bot_part.set_for_starts_humongous(obj_top, fill_size); 260 } 261 262 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 263 assert(!is_humongous(), "sanity / pre-condition"); 264 assert(top() == bottom(), "should be empty"); 265 assert(first_hr->is_starts_humongous(), "pre-condition"); 266 267 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); 268 _type.set_continues_humongous(); 269 _humongous_start_region = first_hr; 270 } 271 272 void HeapRegion::clear_humongous() { 273 assert(is_humongous(), "pre-condition"); 274 275 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 276 _humongous_start_region = NULL; 277 } 278 279 HeapRegion::HeapRegion(uint hrm_index, 280 G1BlockOffsetTable* bot, 281 MemRegion mr) : 282 G1ContiguousSpace(bot), 283 _hrm_index(hrm_index), 284 _allocation_context(AllocationContext::system()), 285 _humongous_start_region(NULL), 286 _evacuation_failed(false), 287 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 288 _next(NULL), _prev(NULL), 289 #ifdef ASSERT 290 _containing_set(NULL), 291 #endif // ASSERT 292 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 293 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0) 294 { 295 _rem_set = new HeapRegionRemSet(bot, this); 296 297 initialize(mr); 298 } 299 300 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 301 assert(_rem_set->is_empty(), "Remembered set must be empty"); 302 303 G1ContiguousSpace::initialize(mr, clear_space, mangle_space); 304 305 hr_clear(false /*par*/, false /*clear_space*/); 306 set_top(bottom()); 307 record_timestamp(); 308 } 309 310 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { 311 HeapRegionTracer::send_region_type_change(_hrm_index, 312 get_trace_type(), 313 to, 314 (uintptr_t)bottom(), 315 used(), 316 (uint)allocation_context()); 317 } 318 319 CompactibleSpace* HeapRegion::next_compaction_space() const { 320 return G1CollectedHeap::heap()->next_compaction_region(this); 321 } 322 323 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 324 bool during_conc_mark) { 325 // We always recreate the prev marking info and we'll explicitly 326 // mark all objects we find to be self-forwarded on the prev 327 // bitmap. So all objects need to be below PTAMS. 328 _prev_marked_bytes = 0; 329 330 if (during_initial_mark) { 331 // During initial-mark, we'll also explicitly mark all objects 332 // we find to be self-forwarded on the next bitmap. So all 333 // objects need to be below NTAMS. 334 _next_top_at_mark_start = top(); 335 _next_marked_bytes = 0; 336 } else if (during_conc_mark) { 337 // During concurrent mark, all objects in the CSet (including 338 // the ones we find to be self-forwarded) are implicitly live. 339 // So all objects need to be above NTAMS. 340 _next_top_at_mark_start = bottom(); 341 _next_marked_bytes = 0; 342 } 343 } 344 345 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) { 346 assert(marked_bytes <= used(), 347 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 348 _prev_top_at_mark_start = top(); 349 _prev_marked_bytes = marked_bytes; 350 } 351 352 // Humongous objects are allocated directly in the old-gen. Need 353 // special handling for concurrent processing encountering an 354 // in-progress allocation. 355 static bool do_oops_on_card_in_humongous(MemRegion mr, 356 FilterOutOfRegionClosure* cl, 357 HeapRegion* hr, 358 G1CollectedHeap* g1h) { 359 assert(hr->is_humongous(), "precondition"); 360 HeapRegion* sr = hr->humongous_start_region(); 361 oop obj = oop(sr->bottom()); 362 363 // If concurrent and klass_or_null is NULL, then space has been 364 // allocated but the object has not yet been published by setting 365 // the klass. That can only happen if the card is stale. However, 366 // we've already set the card clean, so we must return failure, 367 // since the allocating thread could have performed a write to the 368 // card that might be missed otherwise. 369 if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) { 370 return false; 371 } 372 373 // We have a well-formed humongous object at the start of sr. 374 // Only filler objects follow a humongous object in the containing 375 // regions, and we can ignore those. So only process the one 376 // humongous object. 377 if (!g1h->is_obj_dead(obj, sr)) { 378 if (obj->is_objArray() || (sr->bottom() < mr.start())) { 379 // objArrays are always marked precisely, so limit processing 380 // with mr. Non-objArrays might be precisely marked, and since 381 // it's humongous it's worthwhile avoiding full processing. 382 // However, the card could be stale and only cover filler 383 // objects. That should be rare, so not worth checking for; 384 // instead let it fall out from the bounded iteration. 385 obj->oop_iterate(cl, mr); 386 } else { 387 // If obj is not an objArray and mr contains the start of the 388 // obj, then this could be an imprecise mark, and we need to 389 // process the entire object. 390 obj->oop_iterate(cl); 391 } 392 } 393 return true; 394 } 395 396 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr, 397 FilterOutOfRegionClosure* cl) { 398 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region"); 399 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 400 401 // Special handling for humongous regions. 402 if (is_humongous()) { 403 return do_oops_on_card_in_humongous(mr, cl, this, g1h); 404 } 405 assert(is_old(), "precondition"); 406 407 // Because mr has been trimmed to what's been allocated in this 408 // region, the parts of the heap that are examined here are always 409 // parsable; there's no need to use klass_or_null to detect 410 // in-progress allocation. 411 412 // Cache the boundaries of the memory region in some const locals 413 HeapWord* const start = mr.start(); 414 HeapWord* const end = mr.end(); 415 416 // Find the obj that extends onto mr.start(). 417 // Update BOT as needed while finding start of (possibly dead) 418 // object containing the start of the region. 419 HeapWord* cur = block_start(start); 420 421 #ifdef ASSERT 422 { 423 assert(cur <= start, 424 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); 425 HeapWord* next = cur + block_size(cur); 426 assert(start < next, 427 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); 428 } 429 #endif 430 431 do { 432 oop obj = oop(cur); 433 assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur)); 434 assert(obj->klass_or_null() != NULL, 435 "Unparsable heap at " PTR_FORMAT, p2i(cur)); 436 437 if (g1h->is_obj_dead(obj, this)) { 438 // Carefully step over dead object. 439 cur += block_size(cur); 440 } else { 441 // Step over live object, and process its references. 442 cur += obj->size(); 443 // Non-objArrays are usually marked imprecise at the object 444 // start, in which case we need to iterate over them in full. 445 // objArrays are precisely marked, but can still be iterated 446 // over in full if completely covered. 447 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 448 obj->oop_iterate(cl); 449 } else { 450 obj->oop_iterate(cl, mr); 451 } 452 } 453 } while (cur < end); 454 455 return true; 456 } 457 458 // Code roots support 459 460 void HeapRegion::add_strong_code_root(nmethod* nm) { 461 HeapRegionRemSet* hrrs = rem_set(); 462 hrrs->add_strong_code_root(nm); 463 } 464 465 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 466 assert_locked_or_safepoint(CodeCache_lock); 467 HeapRegionRemSet* hrrs = rem_set(); 468 hrrs->add_strong_code_root_locked(nm); 469 } 470 471 void HeapRegion::remove_strong_code_root(nmethod* nm) { 472 HeapRegionRemSet* hrrs = rem_set(); 473 hrrs->remove_strong_code_root(nm); 474 } 475 476 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 477 HeapRegionRemSet* hrrs = rem_set(); 478 hrrs->strong_code_roots_do(blk); 479 } 480 481 class VerifyStrongCodeRootOopClosure: public OopClosure { 482 const HeapRegion* _hr; 483 bool _failures; 484 bool _has_oops_in_region; 485 486 template <class T> void do_oop_work(T* p) { 487 T heap_oop = oopDesc::load_heap_oop(p); 488 if (!oopDesc::is_null(heap_oop)) { 489 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 490 491 // Note: not all the oops embedded in the nmethod are in the 492 // current region. We only look at those which are. 493 if (_hr->is_in(obj)) { 494 // Object is in the region. Check that its less than top 495 if (_hr->top() <= (HeapWord*)obj) { 496 // Object is above top 497 log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT, 498 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 499 _failures = true; 500 return; 501 } 502 // Nmethod has at least one oop in the current region 503 _has_oops_in_region = true; 504 } 505 } 506 } 507 508 public: 509 VerifyStrongCodeRootOopClosure(const HeapRegion* hr): 510 _hr(hr), _failures(false), _has_oops_in_region(false) {} 511 512 void do_oop(narrowOop* p) { do_oop_work(p); } 513 void do_oop(oop* p) { do_oop_work(p); } 514 515 bool failures() { return _failures; } 516 bool has_oops_in_region() { return _has_oops_in_region; } 517 }; 518 519 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 520 const HeapRegion* _hr; 521 bool _failures; 522 public: 523 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 524 _hr(hr), _failures(false) {} 525 526 void do_code_blob(CodeBlob* cb) { 527 nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); 528 if (nm != NULL) { 529 // Verify that the nemthod is live 530 if (!nm->is_alive()) { 531 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 532 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 533 _failures = true; 534 } else { 535 VerifyStrongCodeRootOopClosure oop_cl(_hr); 536 nm->oops_do(&oop_cl); 537 if (!oop_cl.has_oops_in_region()) { 538 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 539 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 540 _failures = true; 541 } else if (oop_cl.failures()) { 542 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 543 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 544 _failures = true; 545 } 546 } 547 } 548 } 549 550 bool failures() { return _failures; } 551 }; 552 553 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 554 if (!G1VerifyHeapRegionCodeRoots) { 555 // We're not verifying code roots. 556 return; 557 } 558 if (vo == VerifyOption_G1UseMarkWord) { 559 // Marking verification during a full GC is performed after class 560 // unloading, code cache unloading, etc so the strong code roots 561 // attached to each heap region are in an inconsistent state. They won't 562 // be consistent until the strong code roots are rebuilt after the 563 // actual GC. Skip verifying the strong code roots in this particular 564 // time. 565 assert(VerifyDuringGC, "only way to get here"); 566 return; 567 } 568 569 HeapRegionRemSet* hrrs = rem_set(); 570 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 571 572 // if this region is empty then there should be no entries 573 // on its strong code root list 574 if (is_empty()) { 575 if (strong_code_roots_length > 0) { 576 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries", 577 p2i(bottom()), p2i(end()), strong_code_roots_length); 578 *failures = true; 579 } 580 return; 581 } 582 583 if (is_continues_humongous()) { 584 if (strong_code_roots_length > 0) { 585 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 586 HR_FORMAT_PARAMS(this), strong_code_roots_length); 587 *failures = true; 588 } 589 return; 590 } 591 592 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 593 strong_code_roots_do(&cb_cl); 594 595 if (cb_cl.failures()) { 596 *failures = true; 597 } 598 } 599 600 void HeapRegion::print() const { print_on(tty); } 601 void HeapRegion::print_on(outputStream* st) const { 602 st->print("|%4u", this->_hrm_index); 603 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, 604 p2i(bottom()), p2i(top()), p2i(end())); 605 st->print("|%3d%%", (int) ((double) used() * 100 / capacity())); 606 st->print("|%2s", get_short_type_str()); 607 if (in_collection_set()) { 608 st->print("|CS"); 609 } else { 610 st->print("| "); 611 } 612 st->print("|TS%3u", _gc_time_stamp); 613 st->print("|AC%3u", allocation_context()); 614 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|", 615 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start())); 616 } 617 618 class G1VerificationClosure : public OopClosure { 619 protected: 620 G1CollectedHeap* _g1h; 621 CardTableModRefBS* _bs; 622 oop _containing_obj; 623 bool _failures; 624 int _n_failures; 625 VerifyOption _vo; 626 public: 627 // _vo == UsePrevMarking -> use "prev" marking information, 628 // _vo == UseNextMarking -> use "next" marking information, 629 // _vo == UseMarkWord -> use mark word from object header. 630 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 631 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 632 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { 633 } 634 635 void set_containing_obj(oop obj) { 636 _containing_obj = obj; 637 } 638 639 bool failures() { return _failures; } 640 int n_failures() { return _n_failures; } 641 642 void print_object(outputStream* out, oop obj) { 643 #ifdef PRODUCT 644 Klass* k = obj->klass(); 645 const char* class_name = k->external_name(); 646 out->print_cr("class name %s", class_name); 647 #else // PRODUCT 648 obj->print_on(out); 649 #endif // PRODUCT 650 } 651 }; 652 653 class VerifyLiveClosure : public G1VerificationClosure { 654 public: 655 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 656 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 657 virtual void do_oop(oop* p) { do_oop_work(p); } 658 659 template <class T> 660 void do_oop_work(T* p) { 661 assert(_containing_obj != NULL, "Precondition"); 662 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 663 "Precondition"); 664 verify_liveness(p); 665 } 666 667 template <class T> 668 void verify_liveness(T* p) { 669 T heap_oop = oopDesc::load_heap_oop(p); 670 Log(gc, verify) log; 671 if (!oopDesc::is_null(heap_oop)) { 672 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 673 bool failed = false; 674 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 675 MutexLockerEx x(ParGCRareEvent_lock, 676 Mutex::_no_safepoint_check_flag); 677 678 if (!_failures) { 679 log.error("----------"); 680 } 681 ResourceMark rm; 682 if (!_g1h->is_in_closed_subset(obj)) { 683 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 684 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 685 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 686 print_object(log.error_stream(), _containing_obj); 687 log.error("points to obj " PTR_FORMAT " not in the heap", p2i(obj)); 688 } else { 689 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 690 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 691 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 692 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 693 print_object(log.error_stream(), _containing_obj); 694 log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 695 p2i(obj), p2i(to->bottom()), p2i(to->end())); 696 print_object(log.error_stream(), obj); 697 } 698 log.error("----------"); 699 _failures = true; 700 failed = true; 701 _n_failures++; 702 } 703 } 704 } 705 }; 706 707 class VerifyRemSetClosure : public G1VerificationClosure { 708 public: 709 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 710 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 711 virtual void do_oop(oop* p) { do_oop_work(p); } 712 713 template <class T> 714 void do_oop_work(T* p) { 715 assert(_containing_obj != NULL, "Precondition"); 716 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 717 "Precondition"); 718 verify_remembered_set(p); 719 } 720 721 template <class T> 722 void verify_remembered_set(T* p) { 723 T heap_oop = oopDesc::load_heap_oop(p); 724 Log(gc, verify) log; 725 if (!oopDesc::is_null(heap_oop)) { 726 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 727 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 728 HeapRegion* to = _g1h->heap_region_containing(obj); 729 if (from != NULL && to != NULL && 730 from != to && 731 !to->is_pinned()) { 732 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 733 jbyte cv_field = *_bs->byte_for_const(p); 734 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 735 736 bool is_bad = !(from->is_young() 737 || to->rem_set()->contains_reference(p) 738 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed 739 (_containing_obj->is_objArray() ? 740 cv_field == dirty 741 : cv_obj == dirty || cv_field == dirty)); 742 if (is_bad) { 743 MutexLockerEx x(ParGCRareEvent_lock, 744 Mutex::_no_safepoint_check_flag); 745 746 if (!_failures) { 747 log.error("----------"); 748 } 749 log.error("Missing rem set entry:"); 750 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT, 751 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 752 ResourceMark rm; 753 _containing_obj->print_on(log.error_stream()); 754 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to)); 755 if (obj->is_oop()) { 756 obj->print_on(log.error_stream()); 757 } 758 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 759 log.error("----------"); 760 _failures = true; 761 _n_failures++; 762 } 763 } 764 } 765 } 766 }; 767 768 // This really ought to be commoned up into OffsetTableContigSpace somehow. 769 // We would need a mechanism to make that code skip dead objects. 770 771 void HeapRegion::verify(VerifyOption vo, 772 bool* failures) const { 773 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 774 *failures = false; 775 HeapWord* p = bottom(); 776 HeapWord* prev_p = NULL; 777 VerifyLiveClosure vl_cl(g1, vo); 778 VerifyRemSetClosure vr_cl(g1, vo); 779 bool is_region_humongous = is_humongous(); 780 size_t object_num = 0; 781 while (p < top()) { 782 oop obj = oop(p); 783 size_t obj_size = block_size(p); 784 object_num += 1; 785 786 if (!g1->is_obj_dead_cond(obj, this, vo)) { 787 if (obj->is_oop()) { 788 Klass* klass = obj->klass(); 789 bool is_metaspace_object = Metaspace::contains(klass) || 790 (vo == VerifyOption_G1UsePrevMarking && 791 ClassLoaderDataGraph::unload_list_contains(klass)); 792 if (!is_metaspace_object) { 793 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 794 "not metadata", p2i(klass), p2i(obj)); 795 *failures = true; 796 return; 797 } else if (!klass->is_klass()) { 798 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 799 "not a klass", p2i(klass), p2i(obj)); 800 *failures = true; 801 return; 802 } else { 803 vl_cl.set_containing_obj(obj); 804 if (!g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) { 805 // verify liveness and rem_set 806 vr_cl.set_containing_obj(obj); 807 G1Mux2Closure mux(&vl_cl, &vr_cl); 808 obj->oop_iterate_no_header(&mux); 809 810 if (vr_cl.failures()) { 811 *failures = true; 812 } 813 if (G1MaxVerifyFailures >= 0 && 814 vr_cl.n_failures() >= G1MaxVerifyFailures) { 815 return; 816 } 817 } else { 818 // verify only liveness 819 obj->oop_iterate_no_header(&vl_cl); 820 } 821 if (vl_cl.failures()) { 822 *failures = true; 823 } 824 if (G1MaxVerifyFailures >= 0 && 825 vl_cl.n_failures() >= G1MaxVerifyFailures) { 826 return; 827 } 828 } 829 } else { 830 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 831 *failures = true; 832 return; 833 } 834 } 835 prev_p = p; 836 p += obj_size; 837 } 838 839 if (!is_young() && !is_empty()) { 840 _bot_part.verify(); 841 } 842 843 if (is_region_humongous) { 844 oop obj = oop(this->humongous_start_region()->bottom()); 845 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 846 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 847 *failures = true; 848 return; 849 } 850 } 851 852 if (!is_region_humongous && p != top()) { 853 log_error(gc, verify)("end of last object " PTR_FORMAT " " 854 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 855 *failures = true; 856 return; 857 } 858 859 HeapWord* the_end = end(); 860 // Do some extra BOT consistency checking for addresses in the 861 // range [top, end). BOT look-ups in this range should yield 862 // top. No point in doing that if top == end (there's nothing there). 863 if (p < the_end) { 864 // Look up top 865 HeapWord* addr_1 = p; 866 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); 867 if (b_start_1 != p) { 868 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " 869 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 870 p2i(addr_1), p2i(b_start_1), p2i(p)); 871 *failures = true; 872 return; 873 } 874 875 // Look up top + 1 876 HeapWord* addr_2 = p + 1; 877 if (addr_2 < the_end) { 878 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); 879 if (b_start_2 != p) { 880 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 881 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 882 p2i(addr_2), p2i(b_start_2), p2i(p)); 883 *failures = true; 884 return; 885 } 886 } 887 888 // Look up an address between top and end 889 size_t diff = pointer_delta(the_end, p) / 2; 890 HeapWord* addr_3 = p + diff; 891 if (addr_3 < the_end) { 892 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); 893 if (b_start_3 != p) { 894 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 895 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 896 p2i(addr_3), p2i(b_start_3), p2i(p)); 897 *failures = true; 898 return; 899 } 900 } 901 902 // Look up end - 1 903 HeapWord* addr_4 = the_end - 1; 904 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); 905 if (b_start_4 != p) { 906 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 907 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 908 p2i(addr_4), p2i(b_start_4), p2i(p)); 909 *failures = true; 910 return; 911 } 912 } 913 914 verify_strong_code_roots(vo, failures); 915 } 916 917 void HeapRegion::verify() const { 918 bool dummy = false; 919 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 920 } 921 922 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 923 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 924 *failures = false; 925 HeapWord* p = bottom(); 926 HeapWord* prev_p = NULL; 927 VerifyRemSetClosure vr_cl(g1, vo); 928 while (p < top()) { 929 oop obj = oop(p); 930 size_t obj_size = block_size(p); 931 932 if (!g1->is_obj_dead_cond(obj, this, vo)) { 933 if (obj->is_oop()) { 934 vr_cl.set_containing_obj(obj); 935 obj->oop_iterate_no_header(&vr_cl); 936 937 if (vr_cl.failures()) { 938 *failures = true; 939 } 940 if (G1MaxVerifyFailures >= 0 && 941 vr_cl.n_failures() >= G1MaxVerifyFailures) { 942 return; 943 } 944 } else { 945 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 946 *failures = true; 947 return; 948 } 949 } 950 951 prev_p = p; 952 p += obj_size; 953 } 954 } 955 956 void HeapRegion::verify_rem_set() const { 957 bool failures = false; 958 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 959 guarantee(!failures, "HeapRegion RemSet verification failed"); 960 } 961 962 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 963 scan_and_forward(this, cp); 964 } 965 966 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 967 // away eventually. 968 969 void G1ContiguousSpace::clear(bool mangle_space) { 970 set_top(bottom()); 971 _scan_top = bottom(); 972 CompactibleSpace::clear(mangle_space); 973 reset_bot(); 974 } 975 976 #ifndef PRODUCT 977 void G1ContiguousSpace::mangle_unused_area() { 978 mangle_unused_area_complete(); 979 } 980 981 void G1ContiguousSpace::mangle_unused_area_complete() { 982 SpaceMangler::mangle_region(MemRegion(top(), end())); 983 } 984 #endif 985 986 void G1ContiguousSpace::print() const { 987 print_short(); 988 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 989 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 990 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); 991 } 992 993 HeapWord* G1ContiguousSpace::initialize_threshold() { 994 return _bot_part.initialize_threshold(); 995 } 996 997 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, 998 HeapWord* end) { 999 _bot_part.alloc_block(start, end); 1000 return _bot_part.threshold(); 1001 } 1002 1003 HeapWord* G1ContiguousSpace::scan_top() const { 1004 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1005 HeapWord* local_top = top(); 1006 OrderAccess::loadload(); 1007 const unsigned local_time_stamp = _gc_time_stamp; 1008 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); 1009 if (local_time_stamp < g1h->get_gc_time_stamp()) { 1010 return local_top; 1011 } else { 1012 return _scan_top; 1013 } 1014 } 1015 1016 void G1ContiguousSpace::record_timestamp() { 1017 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1018 uint curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1019 1020 if (_gc_time_stamp < curr_gc_time_stamp) { 1021 // Setting the time stamp here tells concurrent readers to look at 1022 // scan_top to know the maximum allowed address to look at. 1023 1024 // scan_top should be bottom for all regions except for the 1025 // retained old alloc region which should have scan_top == top 1026 HeapWord* st = _scan_top; 1027 guarantee(st == _bottom || st == _top, "invariant"); 1028 1029 _gc_time_stamp = curr_gc_time_stamp; 1030 } 1031 } 1032 1033 void G1ContiguousSpace::record_retained_region() { 1034 // scan_top is the maximum address where it's safe for the next gc to 1035 // scan this region. 1036 _scan_top = top(); 1037 } 1038 1039 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 1040 object_iterate(blk); 1041 } 1042 1043 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { 1044 HeapWord* p = bottom(); 1045 while (p < top()) { 1046 if (block_is_obj(p)) { 1047 blk->do_object(oop(p)); 1048 } 1049 p += block_size(p); 1050 } 1051 } 1052 1053 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : 1054 _bot_part(bot, this), 1055 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1056 _gc_time_stamp(0) 1057 { 1058 } 1059 1060 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 1061 CompactibleSpace::initialize(mr, clear_space, mangle_space); 1062 _top = bottom(); 1063 _scan_top = bottom(); 1064 set_saved_mark_word(NULL); 1065 reset_bot(); 1066 } 1067