1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1HeapRegionTraceType.hpp" 30 #include "gc/g1/g1OopClosures.inline.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionBounds.inline.hpp" 33 #include "gc/g1/heapRegionManager.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/g1/heapRegionTracer.hpp" 36 #include "gc/shared/genOopClosures.inline.hpp" 37 #include "gc/shared/space.inline.hpp" 38 #include "logging/log.hpp" 39 #include "logging/logStream.hpp" 40 #include "memory/iterator.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/atomic.hpp" 44 #include "runtime/orderAccess.inline.hpp" 45 46 int HeapRegion::LogOfHRGrainBytes = 0; 47 int HeapRegion::LogOfHRGrainWords = 0; 48 size_t HeapRegion::GrainBytes = 0; 49 size_t HeapRegion::GrainWords = 0; 50 size_t HeapRegion::CardsPerRegion = 0; 51 52 size_t HeapRegion::max_region_size() { 53 return HeapRegionBounds::max_size(); 54 } 55 56 size_t HeapRegion::min_region_size_in_words() { 57 return HeapRegionBounds::min_size() >> LogHeapWordSize; 58 } 59 60 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 61 size_t region_size = G1HeapRegionSize; 62 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 63 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 64 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 65 HeapRegionBounds::min_size()); 66 } 67 68 int region_size_log = log2_long((jlong) region_size); 69 // Recalculate the region size to make sure it's a power of 70 // 2. This means that region_size is the largest power of 2 that's 71 // <= what we've calculated so far. 72 region_size = ((size_t)1 << region_size_log); 73 74 // Now make sure that we don't go over or under our limits. 75 if (region_size < HeapRegionBounds::min_size()) { 76 region_size = HeapRegionBounds::min_size(); 77 } else if (region_size > HeapRegionBounds::max_size()) { 78 region_size = HeapRegionBounds::max_size(); 79 } 80 81 // And recalculate the log. 82 region_size_log = log2_long((jlong) region_size); 83 84 // Now, set up the globals. 85 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 86 LogOfHRGrainBytes = region_size_log; 87 88 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 89 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 90 91 guarantee(GrainBytes == 0, "we should only set it once"); 92 // The cast to int is safe, given that we've bounded region_size by 93 // MIN_REGION_SIZE and MAX_REGION_SIZE. 94 GrainBytes = region_size; 95 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M); 96 97 guarantee(GrainWords == 0, "we should only set it once"); 98 GrainWords = GrainBytes >> LogHeapWordSize; 99 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 100 101 guarantee(CardsPerRegion == 0, "we should only set it once"); 102 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; 103 104 if (G1HeapRegionSize != GrainBytes) { 105 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); 106 } 107 } 108 109 void HeapRegion::reset_after_compaction() { 110 G1ContiguousSpace::reset_after_compaction(); 111 // After a compaction the mark bitmap is invalid, so we must 112 // treat all objects as being inside the unmarked area. 113 zero_marked_bytes(); 114 init_top_at_mark_start(); 115 } 116 117 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) { 118 assert(_humongous_start_region == NULL, 119 "we should have already filtered out humongous regions"); 120 assert(!in_collection_set(), 121 "Should not clear heap region %u in the collection set", hrm_index()); 122 123 set_allocation_context(AllocationContext::system()); 124 set_young_index_in_cset(-1); 125 uninstall_surv_rate_group(); 126 set_free(); 127 reset_pre_dummy_top(); 128 129 if (!keep_remset) { 130 if (locked) { 131 rem_set()->clear_locked(); 132 } else { 133 rem_set()->clear(); 134 } 135 } 136 137 zero_marked_bytes(); 138 139 init_top_at_mark_start(); 140 _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp(); 141 if (clear_space) clear(SpaceDecorator::Mangle); 142 } 143 144 void HeapRegion::par_clear() { 145 assert(used() == 0, "the region should have been already cleared"); 146 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); 147 HeapRegionRemSet* hrrs = rem_set(); 148 hrrs->clear(); 149 CardTableModRefBS* ct_bs = 150 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set()); 151 ct_bs->clear(MemRegion(bottom(), end())); 152 } 153 154 void HeapRegion::calc_gc_efficiency() { 155 // GC efficiency is the ratio of how much space would be 156 // reclaimed over how long we predict it would take to reclaim it. 157 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 158 G1Policy* g1p = g1h->g1_policy(); 159 160 // Retrieve a prediction of the elapsed time for this region for 161 // a mixed gc because the region will only be evacuated during a 162 // mixed gc. 163 double region_elapsed_time_ms = 164 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 165 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 166 } 167 168 void HeapRegion::set_free() { 169 report_region_type_change(G1HeapRegionTraceType::Free); 170 _type.set_free(); 171 } 172 173 void HeapRegion::set_eden() { 174 report_region_type_change(G1HeapRegionTraceType::Eden); 175 _type.set_eden(); 176 } 177 178 void HeapRegion::set_eden_pre_gc() { 179 report_region_type_change(G1HeapRegionTraceType::Eden); 180 _type.set_eden_pre_gc(); 181 } 182 183 void HeapRegion::set_survivor() { 184 report_region_type_change(G1HeapRegionTraceType::Survivor); 185 _type.set_survivor(); 186 } 187 188 void HeapRegion::move_to_old() { 189 if (_type.relabel_as_old()) { 190 report_region_type_change(G1HeapRegionTraceType::Old); 191 } 192 } 193 194 void HeapRegion::set_old() { 195 report_region_type_change(G1HeapRegionTraceType::Old); 196 _type.set_old(); 197 } 198 199 void HeapRegion::set_open_archive() { 200 report_region_type_change(G1HeapRegionTraceType::OpenArchive); 201 _type.set_open_archive(); 202 } 203 204 void HeapRegion::set_closed_archive() { 205 report_region_type_change(G1HeapRegionTraceType::ClosedArchive); 206 _type.set_closed_archive(); 207 } 208 209 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { 210 assert(!is_humongous(), "sanity / pre-condition"); 211 assert(top() == bottom(), "should be empty"); 212 213 report_region_type_change(G1HeapRegionTraceType::StartsHumongous); 214 _type.set_starts_humongous(); 215 _humongous_start_region = this; 216 217 _bot_part.set_for_starts_humongous(obj_top, fill_size); 218 } 219 220 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 221 assert(!is_humongous(), "sanity / pre-condition"); 222 assert(top() == bottom(), "should be empty"); 223 assert(first_hr->is_starts_humongous(), "pre-condition"); 224 225 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); 226 _type.set_continues_humongous(); 227 _humongous_start_region = first_hr; 228 229 _bot_part.set_object_can_span(true); 230 } 231 232 void HeapRegion::clear_humongous() { 233 assert(is_humongous(), "pre-condition"); 234 235 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 236 _humongous_start_region = NULL; 237 238 _bot_part.set_object_can_span(false); 239 } 240 241 HeapRegion::HeapRegion(uint hrm_index, 242 G1BlockOffsetTable* bot, 243 MemRegion mr) : 244 G1ContiguousSpace(bot), 245 _hrm_index(hrm_index), 246 _allocation_context(AllocationContext::system()), 247 _humongous_start_region(NULL), 248 _evacuation_failed(false), 249 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 250 _next(NULL), _prev(NULL), 251 #ifdef ASSERT 252 _containing_set(NULL), 253 #endif // ASSERT 254 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 255 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0) 256 { 257 _rem_set = new HeapRegionRemSet(bot, this); 258 259 initialize(mr); 260 } 261 262 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 263 assert(_rem_set->is_empty(), "Remembered set must be empty"); 264 265 G1ContiguousSpace::initialize(mr, clear_space, mangle_space); 266 267 hr_clear(false /*par*/, false /*clear_space*/); 268 set_top(bottom()); 269 record_timestamp(); 270 } 271 272 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { 273 HeapRegionTracer::send_region_type_change(_hrm_index, 274 get_trace_type(), 275 to, 276 (uintptr_t)bottom(), 277 used(), 278 (uint)allocation_context()); 279 } 280 281 CompactibleSpace* HeapRegion::next_compaction_space() const { 282 return G1CollectedHeap::heap()->next_compaction_region(this); 283 } 284 285 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 286 bool during_conc_mark) { 287 // We always recreate the prev marking info and we'll explicitly 288 // mark all objects we find to be self-forwarded on the prev 289 // bitmap. So all objects need to be below PTAMS. 290 _prev_marked_bytes = 0; 291 292 if (during_initial_mark) { 293 // During initial-mark, we'll also explicitly mark all objects 294 // we find to be self-forwarded on the next bitmap. So all 295 // objects need to be below NTAMS. 296 _next_top_at_mark_start = top(); 297 _next_marked_bytes = 0; 298 } else if (during_conc_mark) { 299 // During concurrent mark, all objects in the CSet (including 300 // the ones we find to be self-forwarded) are implicitly live. 301 // So all objects need to be above NTAMS. 302 _next_top_at_mark_start = bottom(); 303 _next_marked_bytes = 0; 304 } 305 } 306 307 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) { 308 assert(marked_bytes <= used(), 309 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 310 _prev_top_at_mark_start = top(); 311 _prev_marked_bytes = marked_bytes; 312 } 313 314 // Code roots support 315 316 void HeapRegion::add_strong_code_root(nmethod* nm) { 317 HeapRegionRemSet* hrrs = rem_set(); 318 hrrs->add_strong_code_root(nm); 319 } 320 321 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 322 assert_locked_or_safepoint(CodeCache_lock); 323 HeapRegionRemSet* hrrs = rem_set(); 324 hrrs->add_strong_code_root_locked(nm); 325 } 326 327 void HeapRegion::remove_strong_code_root(nmethod* nm) { 328 HeapRegionRemSet* hrrs = rem_set(); 329 hrrs->remove_strong_code_root(nm); 330 } 331 332 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 333 HeapRegionRemSet* hrrs = rem_set(); 334 hrrs->strong_code_roots_do(blk); 335 } 336 337 class VerifyStrongCodeRootOopClosure: public OopClosure { 338 const HeapRegion* _hr; 339 bool _failures; 340 bool _has_oops_in_region; 341 342 template <class T> void do_oop_work(T* p) { 343 T heap_oop = oopDesc::load_heap_oop(p); 344 if (!oopDesc::is_null(heap_oop)) { 345 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 346 347 // Note: not all the oops embedded in the nmethod are in the 348 // current region. We only look at those which are. 349 if (_hr->is_in(obj)) { 350 // Object is in the region. Check that its less than top 351 if (_hr->top() <= (HeapWord*)obj) { 352 // Object is above top 353 log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT, 354 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 355 _failures = true; 356 return; 357 } 358 // Nmethod has at least one oop in the current region 359 _has_oops_in_region = true; 360 } 361 } 362 } 363 364 public: 365 VerifyStrongCodeRootOopClosure(const HeapRegion* hr): 366 _hr(hr), _failures(false), _has_oops_in_region(false) {} 367 368 void do_oop(narrowOop* p) { do_oop_work(p); } 369 void do_oop(oop* p) { do_oop_work(p); } 370 371 bool failures() { return _failures; } 372 bool has_oops_in_region() { return _has_oops_in_region; } 373 }; 374 375 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 376 const HeapRegion* _hr; 377 bool _failures; 378 public: 379 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 380 _hr(hr), _failures(false) {} 381 382 void do_code_blob(CodeBlob* cb) { 383 nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); 384 if (nm != NULL) { 385 // Verify that the nemthod is live 386 if (!nm->is_alive()) { 387 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 388 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 389 _failures = true; 390 } else { 391 VerifyStrongCodeRootOopClosure oop_cl(_hr); 392 nm->oops_do(&oop_cl); 393 if (!oop_cl.has_oops_in_region()) { 394 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 395 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 396 _failures = true; 397 } else if (oop_cl.failures()) { 398 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 399 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 400 _failures = true; 401 } 402 } 403 } 404 } 405 406 bool failures() { return _failures; } 407 }; 408 409 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 410 if (!G1VerifyHeapRegionCodeRoots) { 411 // We're not verifying code roots. 412 return; 413 } 414 if (vo == VerifyOption_G1UseMarkWord) { 415 // Marking verification during a full GC is performed after class 416 // unloading, code cache unloading, etc so the strong code roots 417 // attached to each heap region are in an inconsistent state. They won't 418 // be consistent until the strong code roots are rebuilt after the 419 // actual GC. Skip verifying the strong code roots in this particular 420 // time. 421 assert(VerifyDuringGC, "only way to get here"); 422 return; 423 } 424 425 HeapRegionRemSet* hrrs = rem_set(); 426 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 427 428 // if this region is empty then there should be no entries 429 // on its strong code root list 430 if (is_empty()) { 431 if (strong_code_roots_length > 0) { 432 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries", 433 p2i(bottom()), p2i(end()), strong_code_roots_length); 434 *failures = true; 435 } 436 return; 437 } 438 439 if (is_continues_humongous()) { 440 if (strong_code_roots_length > 0) { 441 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 442 HR_FORMAT_PARAMS(this), strong_code_roots_length); 443 *failures = true; 444 } 445 return; 446 } 447 448 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 449 strong_code_roots_do(&cb_cl); 450 451 if (cb_cl.failures()) { 452 *failures = true; 453 } 454 } 455 456 void HeapRegion::print() const { print_on(tty); } 457 void HeapRegion::print_on(outputStream* st) const { 458 st->print("|%4u", this->_hrm_index); 459 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, 460 p2i(bottom()), p2i(top()), p2i(end())); 461 st->print("|%3d%%", (int) ((double) used() * 100 / capacity())); 462 st->print("|%2s", get_short_type_str()); 463 if (in_collection_set()) { 464 st->print("|CS"); 465 } else { 466 st->print("| "); 467 } 468 st->print("|TS%3u", _gc_time_stamp); 469 st->print("|AC%3u", allocation_context()); 470 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|", 471 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start())); 472 } 473 474 class G1VerificationClosure : public OopClosure { 475 protected: 476 G1CollectedHeap* _g1h; 477 CardTableModRefBS* _bs; 478 oop _containing_obj; 479 bool _failures; 480 int _n_failures; 481 VerifyOption _vo; 482 public: 483 // _vo == UsePrevMarking -> use "prev" marking information, 484 // _vo == UseNextMarking -> use "next" marking information, 485 // _vo == UseMarkWord -> use mark word from object header. 486 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 487 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 488 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { 489 } 490 491 void set_containing_obj(oop obj) { 492 _containing_obj = obj; 493 } 494 495 bool failures() { return _failures; } 496 int n_failures() { return _n_failures; } 497 498 void print_object(outputStream* out, oop obj) { 499 #ifdef PRODUCT 500 Klass* k = obj->klass(); 501 const char* class_name = k->external_name(); 502 out->print_cr("class name %s", class_name); 503 #else // PRODUCT 504 obj->print_on(out); 505 #endif // PRODUCT 506 } 507 }; 508 509 class VerifyLiveClosure : public G1VerificationClosure { 510 public: 511 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 512 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 513 virtual void do_oop(oop* p) { do_oop_work(p); } 514 515 template <class T> 516 void do_oop_work(T* p) { 517 assert(_containing_obj != NULL, "Precondition"); 518 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 519 "Precondition"); 520 verify_liveness(p); 521 } 522 523 template <class T> 524 void verify_liveness(T* p) { 525 T heap_oop = oopDesc::load_heap_oop(p); 526 Log(gc, verify) log; 527 if (!oopDesc::is_null(heap_oop)) { 528 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 529 bool failed = false; 530 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 531 MutexLockerEx x(ParGCRareEvent_lock, 532 Mutex::_no_safepoint_check_flag); 533 534 if (!_failures) { 535 log.error("----------"); 536 } 537 ResourceMark rm; 538 if (!_g1h->is_in_closed_subset(obj)) { 539 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 540 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 541 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 542 LogStream ls(log.error()); 543 print_object(&ls, _containing_obj); 544 log.error("points to obj " PTR_FORMAT " not in the heap", p2i(obj)); 545 } else { 546 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 547 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 548 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 549 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 550 LogStream ls(log.error()); 551 print_object(&ls, _containing_obj); 552 log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 553 p2i(obj), p2i(to->bottom()), p2i(to->end())); 554 print_object(&ls, obj); 555 } 556 log.error("----------"); 557 _failures = true; 558 failed = true; 559 _n_failures++; 560 } 561 } 562 } 563 }; 564 565 class VerifyRemSetClosure : public G1VerificationClosure { 566 public: 567 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 568 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 569 virtual void do_oop(oop* p) { do_oop_work(p); } 570 571 template <class T> 572 void do_oop_work(T* p) { 573 assert(_containing_obj != NULL, "Precondition"); 574 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 575 "Precondition"); 576 verify_remembered_set(p); 577 } 578 579 template <class T> 580 void verify_remembered_set(T* p) { 581 T heap_oop = oopDesc::load_heap_oop(p); 582 Log(gc, verify) log; 583 if (!oopDesc::is_null(heap_oop)) { 584 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 585 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 586 HeapRegion* to = _g1h->heap_region_containing(obj); 587 if (from != NULL && to != NULL && 588 from != to && 589 !to->is_pinned()) { 590 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); 591 jbyte cv_field = *_bs->byte_for_const(p); 592 const jbyte dirty = CardTableModRefBS::dirty_card_val(); 593 594 bool is_bad = !(from->is_young() 595 || to->rem_set()->contains_reference(p) 596 || (_containing_obj->is_objArray() ? 597 cv_field == dirty : 598 cv_obj == dirty || cv_field == dirty)); 599 if (is_bad) { 600 MutexLockerEx x(ParGCRareEvent_lock, 601 Mutex::_no_safepoint_check_flag); 602 603 if (!_failures) { 604 log.error("----------"); 605 } 606 log.error("Missing rem set entry:"); 607 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT, 608 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 609 ResourceMark rm; 610 LogStream ls(log.error()); 611 _containing_obj->print_on(&ls); 612 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to)); 613 if (oopDesc::is_oop(obj)) { 614 obj->print_on(&ls); 615 } 616 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 617 log.error("----------"); 618 _failures = true; 619 _n_failures++; 620 } 621 } 622 } 623 } 624 }; 625 626 // Closure that applies the given two closures in sequence. 627 class G1Mux2Closure : public OopClosure { 628 OopClosure* _c1; 629 OopClosure* _c2; 630 public: 631 G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; } 632 template <class T> inline void do_oop_work(T* p) { 633 // Apply first closure; then apply the second. 634 _c1->do_oop(p); 635 _c2->do_oop(p); 636 } 637 virtual inline void do_oop(oop* p) { do_oop_work(p); } 638 virtual inline void do_oop(narrowOop* p) { do_oop_work(p); } 639 }; 640 641 // This really ought to be commoned up into OffsetTableContigSpace somehow. 642 // We would need a mechanism to make that code skip dead objects. 643 644 void HeapRegion::verify(VerifyOption vo, 645 bool* failures) const { 646 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 647 *failures = false; 648 HeapWord* p = bottom(); 649 HeapWord* prev_p = NULL; 650 VerifyLiveClosure vl_cl(g1, vo); 651 VerifyRemSetClosure vr_cl(g1, vo); 652 bool is_region_humongous = is_humongous(); 653 size_t object_num = 0; 654 while (p < top()) { 655 oop obj = oop(p); 656 size_t obj_size = block_size(p); 657 object_num += 1; 658 659 if (!g1->is_obj_dead_cond(obj, this, vo)) { 660 if (oopDesc::is_oop(obj)) { 661 Klass* klass = obj->klass(); 662 bool is_metaspace_object = Metaspace::contains(klass) || 663 (vo == VerifyOption_G1UsePrevMarking && 664 ClassLoaderDataGraph::unload_list_contains(klass)); 665 if (!is_metaspace_object) { 666 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 667 "not metadata", p2i(klass), p2i(obj)); 668 *failures = true; 669 return; 670 } else if (!klass->is_klass()) { 671 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 672 "not a klass", p2i(klass), p2i(obj)); 673 *failures = true; 674 return; 675 } else { 676 vl_cl.set_containing_obj(obj); 677 if (!g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) { 678 // verify liveness and rem_set 679 vr_cl.set_containing_obj(obj); 680 G1Mux2Closure mux(&vl_cl, &vr_cl); 681 obj->oop_iterate_no_header(&mux); 682 683 if (vr_cl.failures()) { 684 *failures = true; 685 } 686 if (G1MaxVerifyFailures >= 0 && 687 vr_cl.n_failures() >= G1MaxVerifyFailures) { 688 return; 689 } 690 } else { 691 // verify only liveness 692 obj->oop_iterate_no_header(&vl_cl); 693 } 694 if (vl_cl.failures()) { 695 *failures = true; 696 } 697 if (G1MaxVerifyFailures >= 0 && 698 vl_cl.n_failures() >= G1MaxVerifyFailures) { 699 return; 700 } 701 } 702 } else { 703 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 704 *failures = true; 705 return; 706 } 707 } 708 prev_p = p; 709 p += obj_size; 710 } 711 712 if (!is_young() && !is_empty()) { 713 _bot_part.verify(); 714 } 715 716 if (is_region_humongous) { 717 oop obj = oop(this->humongous_start_region()->bottom()); 718 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 719 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 720 *failures = true; 721 return; 722 } 723 } 724 725 if (!is_region_humongous && p != top()) { 726 log_error(gc, verify)("end of last object " PTR_FORMAT " " 727 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 728 *failures = true; 729 return; 730 } 731 732 HeapWord* the_end = end(); 733 // Do some extra BOT consistency checking for addresses in the 734 // range [top, end). BOT look-ups in this range should yield 735 // top. No point in doing that if top == end (there's nothing there). 736 if (p < the_end) { 737 // Look up top 738 HeapWord* addr_1 = p; 739 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); 740 if (b_start_1 != p) { 741 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " 742 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 743 p2i(addr_1), p2i(b_start_1), p2i(p)); 744 *failures = true; 745 return; 746 } 747 748 // Look up top + 1 749 HeapWord* addr_2 = p + 1; 750 if (addr_2 < the_end) { 751 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); 752 if (b_start_2 != p) { 753 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 754 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 755 p2i(addr_2), p2i(b_start_2), p2i(p)); 756 *failures = true; 757 return; 758 } 759 } 760 761 // Look up an address between top and end 762 size_t diff = pointer_delta(the_end, p) / 2; 763 HeapWord* addr_3 = p + diff; 764 if (addr_3 < the_end) { 765 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); 766 if (b_start_3 != p) { 767 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 768 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 769 p2i(addr_3), p2i(b_start_3), p2i(p)); 770 *failures = true; 771 return; 772 } 773 } 774 775 // Look up end - 1 776 HeapWord* addr_4 = the_end - 1; 777 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); 778 if (b_start_4 != p) { 779 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 780 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 781 p2i(addr_4), p2i(b_start_4), p2i(p)); 782 *failures = true; 783 return; 784 } 785 } 786 787 verify_strong_code_roots(vo, failures); 788 } 789 790 void HeapRegion::verify() const { 791 bool dummy = false; 792 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 793 } 794 795 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 796 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 797 *failures = false; 798 HeapWord* p = bottom(); 799 HeapWord* prev_p = NULL; 800 VerifyRemSetClosure vr_cl(g1, vo); 801 while (p < top()) { 802 oop obj = oop(p); 803 size_t obj_size = block_size(p); 804 805 if (!g1->is_obj_dead_cond(obj, this, vo)) { 806 if (oopDesc::is_oop(obj)) { 807 vr_cl.set_containing_obj(obj); 808 obj->oop_iterate_no_header(&vr_cl); 809 810 if (vr_cl.failures()) { 811 *failures = true; 812 } 813 if (G1MaxVerifyFailures >= 0 && 814 vr_cl.n_failures() >= G1MaxVerifyFailures) { 815 return; 816 } 817 } else { 818 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 819 *failures = true; 820 return; 821 } 822 } 823 824 prev_p = p; 825 p += obj_size; 826 } 827 } 828 829 void HeapRegion::verify_rem_set() const { 830 bool failures = false; 831 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 832 guarantee(!failures, "HeapRegion RemSet verification failed"); 833 } 834 835 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 836 scan_and_forward(this, cp); 837 } 838 839 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 840 // away eventually. 841 842 void G1ContiguousSpace::clear(bool mangle_space) { 843 set_top(bottom()); 844 CompactibleSpace::clear(mangle_space); 845 reset_bot(); 846 } 847 848 #ifndef PRODUCT 849 void G1ContiguousSpace::mangle_unused_area() { 850 mangle_unused_area_complete(); 851 } 852 853 void G1ContiguousSpace::mangle_unused_area_complete() { 854 SpaceMangler::mangle_region(MemRegion(top(), end())); 855 } 856 #endif 857 858 void G1ContiguousSpace::print() const { 859 print_short(); 860 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 861 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 862 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); 863 } 864 865 HeapWord* G1ContiguousSpace::initialize_threshold() { 866 return _bot_part.initialize_threshold(); 867 } 868 869 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, 870 HeapWord* end) { 871 _bot_part.alloc_block(start, end); 872 return _bot_part.threshold(); 873 } 874 875 void G1ContiguousSpace::record_timestamp() { 876 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 877 uint curr_gc_time_stamp = g1h->get_gc_time_stamp(); 878 879 if (_gc_time_stamp < curr_gc_time_stamp) { 880 _gc_time_stamp = curr_gc_time_stamp; 881 } 882 } 883 884 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 885 object_iterate(blk); 886 } 887 888 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { 889 HeapWord* p = bottom(); 890 while (p < top()) { 891 if (block_is_obj(p)) { 892 blk->do_object(oop(p)); 893 } 894 p += block_size(p); 895 } 896 } 897 898 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : 899 _bot_part(bot, this), 900 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 901 _gc_time_stamp(0) 902 { 903 } 904 905 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 906 CompactibleSpace::initialize(mr, clear_space, mangle_space); 907 _top = bottom(); 908 set_saved_mark_word(NULL); 909 reset_bot(); 910 }