1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1HeapRegionTraceType.hpp" 31 #include "gc/g1/g1OopClosures.inline.hpp" 32 #include "gc/g1/heapRegion.inline.hpp" 33 #include "gc/g1/heapRegionBounds.inline.hpp" 34 #include "gc/g1/heapRegionManager.inline.hpp" 35 #include "gc/g1/heapRegionRemSet.hpp" 36 #include "gc/g1/heapRegionTracer.hpp" 37 #include "gc/shared/genOopClosures.inline.hpp" 38 #include "gc/shared/space.inline.hpp" 39 #include "logging/log.hpp" 40 #include "logging/logStream.hpp" 41 #include "memory/iterator.inline.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/access.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "runtime/atomic.hpp" 47 #include "runtime/orderAccess.hpp" 48 #include "utilities/growableArray.hpp" 49 50 int HeapRegion::LogOfHRGrainBytes = 0; 51 int HeapRegion::LogOfHRGrainWords = 0; 52 size_t HeapRegion::GrainBytes = 0; 53 size_t HeapRegion::GrainWords = 0; 54 size_t HeapRegion::CardsPerRegion = 0; 55 56 size_t HeapRegion::max_region_size() { 57 return HeapRegionBounds::max_size(); 58 } 59 60 size_t HeapRegion::min_region_size_in_words() { 61 return HeapRegionBounds::min_size() >> LogHeapWordSize; 62 } 63 64 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 65 size_t region_size = G1HeapRegionSize; 66 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 67 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 68 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 69 HeapRegionBounds::min_size()); 70 } 71 72 int region_size_log = log2_long((jlong) region_size); 73 // Recalculate the region size to make sure it's a power of 74 // 2. This means that region_size is the largest power of 2 that's 75 // <= what we've calculated so far. 76 region_size = ((size_t)1 << region_size_log); 77 78 // Now make sure that we don't go over or under our limits. 79 if (region_size < HeapRegionBounds::min_size()) { 80 region_size = HeapRegionBounds::min_size(); 81 } else if (region_size > HeapRegionBounds::max_size()) { 82 region_size = HeapRegionBounds::max_size(); 83 } 84 85 // And recalculate the log. 86 region_size_log = log2_long((jlong) region_size); 87 88 // Now, set up the globals. 89 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 90 LogOfHRGrainBytes = region_size_log; 91 92 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 93 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 94 95 guarantee(GrainBytes == 0, "we should only set it once"); 96 // The cast to int is safe, given that we've bounded region_size by 97 // MIN_REGION_SIZE and MAX_REGION_SIZE. 98 GrainBytes = region_size; 99 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M); 100 101 guarantee(GrainWords == 0, "we should only set it once"); 102 GrainWords = GrainBytes >> LogHeapWordSize; 103 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 104 105 guarantee(CardsPerRegion == 0, "we should only set it once"); 106 CardsPerRegion = GrainBytes >> G1CardTable::card_shift; 107 108 if (G1HeapRegionSize != GrainBytes) { 109 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); 110 } 111 } 112 113 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) { 114 assert(_humongous_start_region == NULL, 115 "we should have already filtered out humongous regions"); 116 assert(!in_collection_set(), 117 "Should not clear heap region %u in the collection set", hrm_index()); 118 119 set_young_index_in_cset(-1); 120 uninstall_surv_rate_group(); 121 set_free(); 122 reset_pre_dummy_top(); 123 124 if (!keep_remset) { 125 if (locked) { 126 rem_set()->clear_locked(); 127 } else { 128 rem_set()->clear(); 129 } 130 } 131 132 zero_marked_bytes(); 133 134 init_top_at_mark_start(); 135 if (clear_space) clear(SpaceDecorator::Mangle); 136 } 137 138 void HeapRegion::clear_cardtable() { 139 G1CardTable* ct = G1CollectedHeap::heap()->card_table(); 140 ct->clear(MemRegion(bottom(), end())); 141 } 142 143 void HeapRegion::calc_gc_efficiency() { 144 // GC efficiency is the ratio of how much space would be 145 // reclaimed over how long we predict it would take to reclaim it. 146 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 147 G1Policy* policy = g1h->policy(); 148 149 // Retrieve a prediction of the elapsed time for this region for 150 // a mixed gc because the region will only be evacuated during a 151 // mixed gc. 152 double region_elapsed_time_ms = 153 policy->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 154 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 155 } 156 157 void HeapRegion::set_free() { 158 report_region_type_change(G1HeapRegionTraceType::Free); 159 _type.set_free(); 160 } 161 162 void HeapRegion::set_eden() { 163 report_region_type_change(G1HeapRegionTraceType::Eden); 164 _type.set_eden(); 165 } 166 167 void HeapRegion::set_eden_pre_gc() { 168 report_region_type_change(G1HeapRegionTraceType::Eden); 169 _type.set_eden_pre_gc(); 170 } 171 172 void HeapRegion::set_survivor() { 173 report_region_type_change(G1HeapRegionTraceType::Survivor); 174 _type.set_survivor(); 175 } 176 177 void HeapRegion::move_to_old() { 178 if (_type.relabel_as_old()) { 179 report_region_type_change(G1HeapRegionTraceType::Old); 180 } 181 } 182 183 void HeapRegion::set_old() { 184 report_region_type_change(G1HeapRegionTraceType::Old); 185 _type.set_old(); 186 } 187 188 void HeapRegion::set_open_archive() { 189 report_region_type_change(G1HeapRegionTraceType::OpenArchive); 190 _type.set_open_archive(); 191 } 192 193 void HeapRegion::set_closed_archive() { 194 report_region_type_change(G1HeapRegionTraceType::ClosedArchive); 195 _type.set_closed_archive(); 196 } 197 198 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { 199 assert(!is_humongous(), "sanity / pre-condition"); 200 assert(top() == bottom(), "should be empty"); 201 202 report_region_type_change(G1HeapRegionTraceType::StartsHumongous); 203 _type.set_starts_humongous(); 204 _humongous_start_region = this; 205 206 _bot_part.set_for_starts_humongous(obj_top, fill_size); 207 } 208 209 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 210 assert(!is_humongous(), "sanity / pre-condition"); 211 assert(top() == bottom(), "should be empty"); 212 assert(first_hr->is_starts_humongous(), "pre-condition"); 213 214 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); 215 _type.set_continues_humongous(); 216 _humongous_start_region = first_hr; 217 218 _bot_part.set_object_can_span(true); 219 } 220 221 void HeapRegion::clear_humongous() { 222 assert(is_humongous(), "pre-condition"); 223 224 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 225 _humongous_start_region = NULL; 226 227 _bot_part.set_object_can_span(false); 228 } 229 230 HeapRegion::HeapRegion(uint hrm_index, 231 G1BlockOffsetTable* bot, 232 MemRegion mr) : 233 G1ContiguousSpace(bot), 234 _rem_set(NULL), 235 _hrm_index(hrm_index), 236 _type(), 237 _humongous_start_region(NULL), 238 _evacuation_failed(false), 239 _next(NULL), _prev(NULL), 240 #ifdef ASSERT 241 _containing_set(NULL), 242 #endif 243 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 244 _index_in_opt_cset(G1OptionalCSet::InvalidCSetIndex), _young_index_in_cset(-1), 245 _surv_rate_group(NULL), _age_index(-1), 246 _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL), 247 _recorded_rs_length(0), _predicted_elapsed_time_ms(0) 248 { 249 _rem_set = new HeapRegionRemSet(bot, this); 250 251 initialize(mr); 252 } 253 254 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 255 assert(_rem_set->is_empty(), "Remembered set must be empty"); 256 257 G1ContiguousSpace::initialize(mr, clear_space, mangle_space); 258 259 hr_clear(false /*par*/, false /*clear_space*/); 260 set_top(bottom()); 261 } 262 263 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { 264 HeapRegionTracer::send_region_type_change(_hrm_index, 265 get_trace_type(), 266 to, 267 (uintptr_t)bottom(), 268 used()); 269 } 270 271 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 272 bool during_conc_mark) { 273 // We always recreate the prev marking info and we'll explicitly 274 // mark all objects we find to be self-forwarded on the prev 275 // bitmap. So all objects need to be below PTAMS. 276 _prev_marked_bytes = 0; 277 278 if (during_initial_mark) { 279 // During initial-mark, we'll also explicitly mark all objects 280 // we find to be self-forwarded on the next bitmap. So all 281 // objects need to be below NTAMS. 282 _next_top_at_mark_start = top(); 283 _next_marked_bytes = 0; 284 } else if (during_conc_mark) { 285 // During concurrent mark, all objects in the CSet (including 286 // the ones we find to be self-forwarded) are implicitly live. 287 // So all objects need to be above NTAMS. 288 _next_top_at_mark_start = bottom(); 289 _next_marked_bytes = 0; 290 } 291 } 292 293 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) { 294 assert(marked_bytes <= used(), 295 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 296 _prev_top_at_mark_start = top(); 297 _prev_marked_bytes = marked_bytes; 298 } 299 300 // Code roots support 301 302 void HeapRegion::add_strong_code_root(nmethod* nm) { 303 HeapRegionRemSet* hrrs = rem_set(); 304 hrrs->add_strong_code_root(nm); 305 } 306 307 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 308 assert_locked_or_safepoint(CodeCache_lock); 309 HeapRegionRemSet* hrrs = rem_set(); 310 hrrs->add_strong_code_root_locked(nm); 311 } 312 313 void HeapRegion::remove_strong_code_root(nmethod* nm) { 314 HeapRegionRemSet* hrrs = rem_set(); 315 hrrs->remove_strong_code_root(nm); 316 } 317 318 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 319 HeapRegionRemSet* hrrs = rem_set(); 320 hrrs->strong_code_roots_do(blk); 321 } 322 323 class VerifyStrongCodeRootOopClosure: public OopClosure { 324 const HeapRegion* _hr; 325 bool _failures; 326 bool _has_oops_in_region; 327 328 template <class T> void do_oop_work(T* p) { 329 T heap_oop = RawAccess<>::oop_load(p); 330 if (!CompressedOops::is_null(heap_oop)) { 331 oop obj = CompressedOops::decode_not_null(heap_oop); 332 333 // Note: not all the oops embedded in the nmethod are in the 334 // current region. We only look at those which are. 335 if (_hr->is_in(obj)) { 336 // Object is in the region. Check that its less than top 337 if (_hr->top() <= (HeapWord*)obj) { 338 // Object is above top 339 log_error(gc, verify)("Object " PTR_FORMAT " in region " HR_FORMAT " is above top ", 340 p2i(obj), HR_FORMAT_PARAMS(_hr)); 341 _failures = true; 342 return; 343 } 344 // Nmethod has at least one oop in the current region 345 _has_oops_in_region = true; 346 } 347 } 348 } 349 350 public: 351 VerifyStrongCodeRootOopClosure(const HeapRegion* hr): 352 _hr(hr), _failures(false), _has_oops_in_region(false) {} 353 354 void do_oop(narrowOop* p) { do_oop_work(p); } 355 void do_oop(oop* p) { do_oop_work(p); } 356 357 bool failures() { return _failures; } 358 bool has_oops_in_region() { return _has_oops_in_region; } 359 }; 360 361 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 362 const HeapRegion* _hr; 363 bool _failures; 364 public: 365 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 366 _hr(hr), _failures(false) {} 367 368 void do_code_blob(CodeBlob* cb) { 369 nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); 370 if (nm != NULL) { 371 // Verify that the nemthod is live 372 if (!nm->is_alive()) { 373 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 374 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 375 _failures = true; 376 } else { 377 VerifyStrongCodeRootOopClosure oop_cl(_hr); 378 nm->oops_do(&oop_cl); 379 if (!oop_cl.has_oops_in_region()) { 380 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 381 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 382 _failures = true; 383 } else if (oop_cl.failures()) { 384 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 385 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 386 _failures = true; 387 } 388 } 389 } 390 } 391 392 bool failures() { return _failures; } 393 }; 394 395 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 396 if (!G1VerifyHeapRegionCodeRoots) { 397 // We're not verifying code roots. 398 return; 399 } 400 if (vo == VerifyOption_G1UseFullMarking) { 401 // Marking verification during a full GC is performed after class 402 // unloading, code cache unloading, etc so the strong code roots 403 // attached to each heap region are in an inconsistent state. They won't 404 // be consistent until the strong code roots are rebuilt after the 405 // actual GC. Skip verifying the strong code roots in this particular 406 // time. 407 assert(VerifyDuringGC, "only way to get here"); 408 return; 409 } 410 411 HeapRegionRemSet* hrrs = rem_set(); 412 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 413 414 // if this region is empty then there should be no entries 415 // on its strong code root list 416 if (is_empty()) { 417 if (strong_code_roots_length > 0) { 418 log_error(gc, verify)("region " HR_FORMAT " is empty but has " SIZE_FORMAT " code root entries", 419 HR_FORMAT_PARAMS(this), strong_code_roots_length); 420 *failures = true; 421 } 422 return; 423 } 424 425 if (is_continues_humongous()) { 426 if (strong_code_roots_length > 0) { 427 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 428 HR_FORMAT_PARAMS(this), strong_code_roots_length); 429 *failures = true; 430 } 431 return; 432 } 433 434 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 435 strong_code_roots_do(&cb_cl); 436 437 if (cb_cl.failures()) { 438 *failures = true; 439 } 440 } 441 442 void HeapRegion::print() const { print_on(tty); } 443 void HeapRegion::print_on(outputStream* st) const { 444 st->print("|%4u", this->_hrm_index); 445 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, 446 p2i(bottom()), p2i(top()), p2i(end())); 447 st->print("|%3d%%", (int) ((double) used() * 100 / capacity())); 448 st->print("|%2s", get_short_type_str()); 449 if (in_collection_set()) { 450 st->print("|CS"); 451 } else { 452 st->print("| "); 453 } 454 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ", 455 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str()); 456 } 457 458 class G1VerificationClosure : public BasicOopIterateClosure { 459 protected: 460 G1CollectedHeap* _g1h; 461 G1CardTable *_ct; 462 oop _containing_obj; 463 bool _failures; 464 int _n_failures; 465 VerifyOption _vo; 466 public: 467 // _vo == UsePrevMarking -> use "prev" marking information, 468 // _vo == UseNextMarking -> use "next" marking information, 469 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. 470 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 471 _g1h(g1h), _ct(g1h->card_table()), 472 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { 473 } 474 475 void set_containing_obj(oop obj) { 476 _containing_obj = obj; 477 } 478 479 bool failures() { return _failures; } 480 int n_failures() { return _n_failures; } 481 482 void print_object(outputStream* out, oop obj) { 483 #ifdef PRODUCT 484 Klass* k = obj->klass(); 485 const char* class_name = k->external_name(); 486 out->print_cr("class name %s", class_name); 487 #else // PRODUCT 488 obj->print_on(out); 489 #endif // PRODUCT 490 } 491 492 // This closure provides its own oop verification code. 493 debug_only(virtual bool should_verify_oops() { return false; }) 494 }; 495 496 class VerifyLiveClosure : public G1VerificationClosure { 497 public: 498 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 499 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 500 virtual void do_oop(oop* p) { do_oop_work(p); } 501 502 template <class T> 503 void do_oop_work(T* p) { 504 assert(_containing_obj != NULL, "Precondition"); 505 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 506 "Precondition"); 507 verify_liveness(p); 508 } 509 510 template <class T> 511 void verify_liveness(T* p) { 512 T heap_oop = RawAccess<>::oop_load(p); 513 Log(gc, verify) log; 514 if (!CompressedOops::is_null(heap_oop)) { 515 oop obj = CompressedOops::decode_not_null(heap_oop); 516 bool failed = false; 517 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 518 MutexLockerEx x(ParGCRareEvent_lock, 519 Mutex::_no_safepoint_check_flag); 520 521 if (!_failures) { 522 log.error("----------"); 523 } 524 ResourceMark rm; 525 if (!_g1h->is_in_closed_subset(obj)) { 526 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 527 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, 528 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 529 LogStream ls(log.error()); 530 print_object(&ls, _containing_obj); 531 HeapRegion* const to = _g1h->heap_region_containing(obj); 532 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", 533 p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); 534 } else { 535 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 536 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 537 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, 538 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 539 LogStream ls(log.error()); 540 print_object(&ls, _containing_obj); 541 log.error("points to dead obj " PTR_FORMAT " in region " HR_FORMAT, 542 p2i(obj), HR_FORMAT_PARAMS(to)); 543 print_object(&ls, obj); 544 } 545 log.error("----------"); 546 _failures = true; 547 failed = true; 548 _n_failures++; 549 } 550 } 551 } 552 }; 553 554 class VerifyRemSetClosure : public G1VerificationClosure { 555 public: 556 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 557 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 558 virtual void do_oop(oop* p) { do_oop_work(p); } 559 560 template <class T> 561 void do_oop_work(T* p) { 562 assert(_containing_obj != NULL, "Precondition"); 563 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 564 "Precondition"); 565 verify_remembered_set(p); 566 } 567 568 template <class T> 569 void verify_remembered_set(T* p) { 570 T heap_oop = RawAccess<>::oop_load(p); 571 Log(gc, verify) log; 572 if (!CompressedOops::is_null(heap_oop)) { 573 oop obj = CompressedOops::decode_not_null(heap_oop); 574 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 575 HeapRegion* to = _g1h->heap_region_containing(obj); 576 if (from != NULL && to != NULL && 577 from != to && 578 !to->is_pinned() && 579 to->rem_set()->is_complete()) { 580 jbyte cv_obj = *_ct->byte_for_const(_containing_obj); 581 jbyte cv_field = *_ct->byte_for_const(p); 582 const jbyte dirty = G1CardTable::dirty_card_val(); 583 584 bool is_bad = !(from->is_young() 585 || to->rem_set()->contains_reference(p) 586 || (_containing_obj->is_objArray() ? 587 cv_field == dirty : 588 cv_obj == dirty || cv_field == dirty)); 589 if (is_bad) { 590 MutexLockerEx x(ParGCRareEvent_lock, 591 Mutex::_no_safepoint_check_flag); 592 593 if (!_failures) { 594 log.error("----------"); 595 } 596 log.error("Missing rem set entry:"); 597 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT " in region " HR_FORMAT, 598 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 599 ResourceMark rm; 600 LogStream ls(log.error()); 601 _containing_obj->print_on(&ls); 602 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", 603 p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); 604 if (oopDesc::is_oop(obj)) { 605 obj->print_on(&ls); 606 } 607 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 608 log.error("----------"); 609 _failures = true; 610 _n_failures++; 611 } 612 } 613 } 614 } 615 }; 616 617 // Closure that applies the given two closures in sequence. 618 class G1Mux2Closure : public BasicOopIterateClosure { 619 OopClosure* _c1; 620 OopClosure* _c2; 621 public: 622 G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; } 623 template <class T> inline void do_oop_work(T* p) { 624 // Apply first closure; then apply the second. 625 _c1->do_oop(p); 626 _c2->do_oop(p); 627 } 628 virtual inline void do_oop(oop* p) { do_oop_work(p); } 629 virtual inline void do_oop(narrowOop* p) { do_oop_work(p); } 630 631 // This closure provides its own oop verification code. 632 debug_only(virtual bool should_verify_oops() { return false; }) 633 }; 634 635 // This really ought to be commoned up into OffsetTableContigSpace somehow. 636 // We would need a mechanism to make that code skip dead objects. 637 638 void HeapRegion::verify(VerifyOption vo, 639 bool* failures) const { 640 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 641 *failures = false; 642 HeapWord* p = bottom(); 643 HeapWord* prev_p = NULL; 644 VerifyLiveClosure vl_cl(g1h, vo); 645 VerifyRemSetClosure vr_cl(g1h, vo); 646 bool is_region_humongous = is_humongous(); 647 size_t object_num = 0; 648 while (p < top()) { 649 oop obj = oop(p); 650 size_t obj_size = block_size(p); 651 object_num += 1; 652 653 if (!g1h->is_obj_dead_cond(obj, this, vo)) { 654 if (oopDesc::is_oop(obj)) { 655 Klass* klass = obj->klass(); 656 bool is_metaspace_object = Metaspace::contains(klass); 657 if (!is_metaspace_object) { 658 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 659 "not metadata", p2i(klass), p2i(obj)); 660 *failures = true; 661 return; 662 } else if (!klass->is_klass()) { 663 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 664 "not a klass", p2i(klass), p2i(obj)); 665 *failures = true; 666 return; 667 } else { 668 vl_cl.set_containing_obj(obj); 669 if (!g1h->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) { 670 // verify liveness and rem_set 671 vr_cl.set_containing_obj(obj); 672 G1Mux2Closure mux(&vl_cl, &vr_cl); 673 obj->oop_iterate(&mux); 674 675 if (vr_cl.failures()) { 676 *failures = true; 677 } 678 if (G1MaxVerifyFailures >= 0 && 679 vr_cl.n_failures() >= G1MaxVerifyFailures) { 680 return; 681 } 682 } else { 683 // verify only liveness 684 obj->oop_iterate(&vl_cl); 685 } 686 if (vl_cl.failures()) { 687 *failures = true; 688 } 689 if (G1MaxVerifyFailures >= 0 && 690 vl_cl.n_failures() >= G1MaxVerifyFailures) { 691 return; 692 } 693 } 694 } else { 695 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 696 *failures = true; 697 return; 698 } 699 } 700 prev_p = p; 701 p += obj_size; 702 } 703 704 if (!is_young() && !is_empty()) { 705 _bot_part.verify(); 706 } 707 708 if (is_region_humongous) { 709 oop obj = oop(this->humongous_start_region()->bottom()); 710 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 711 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 712 *failures = true; 713 return; 714 } 715 } 716 717 if (!is_region_humongous && p != top()) { 718 log_error(gc, verify)("end of last object " PTR_FORMAT " " 719 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 720 *failures = true; 721 return; 722 } 723 724 HeapWord* the_end = end(); 725 // Do some extra BOT consistency checking for addresses in the 726 // range [top, end). BOT look-ups in this range should yield 727 // top. No point in doing that if top == end (there's nothing there). 728 if (p < the_end) { 729 // Look up top 730 HeapWord* addr_1 = p; 731 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); 732 if (b_start_1 != p) { 733 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " 734 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 735 p2i(addr_1), p2i(b_start_1), p2i(p)); 736 *failures = true; 737 return; 738 } 739 740 // Look up top + 1 741 HeapWord* addr_2 = p + 1; 742 if (addr_2 < the_end) { 743 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); 744 if (b_start_2 != p) { 745 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 746 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 747 p2i(addr_2), p2i(b_start_2), p2i(p)); 748 *failures = true; 749 return; 750 } 751 } 752 753 // Look up an address between top and end 754 size_t diff = pointer_delta(the_end, p) / 2; 755 HeapWord* addr_3 = p + diff; 756 if (addr_3 < the_end) { 757 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); 758 if (b_start_3 != p) { 759 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 760 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 761 p2i(addr_3), p2i(b_start_3), p2i(p)); 762 *failures = true; 763 return; 764 } 765 } 766 767 // Look up end - 1 768 HeapWord* addr_4 = the_end - 1; 769 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); 770 if (b_start_4 != p) { 771 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 772 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 773 p2i(addr_4), p2i(b_start_4), p2i(p)); 774 *failures = true; 775 return; 776 } 777 } 778 779 verify_strong_code_roots(vo, failures); 780 } 781 782 void HeapRegion::verify() const { 783 bool dummy = false; 784 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 785 } 786 787 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 788 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 789 *failures = false; 790 HeapWord* p = bottom(); 791 HeapWord* prev_p = NULL; 792 VerifyRemSetClosure vr_cl(g1h, vo); 793 while (p < top()) { 794 oop obj = oop(p); 795 size_t obj_size = block_size(p); 796 797 if (!g1h->is_obj_dead_cond(obj, this, vo)) { 798 if (oopDesc::is_oop(obj)) { 799 vr_cl.set_containing_obj(obj); 800 obj->oop_iterate(&vr_cl); 801 802 if (vr_cl.failures()) { 803 *failures = true; 804 } 805 if (G1MaxVerifyFailures >= 0 && 806 vr_cl.n_failures() >= G1MaxVerifyFailures) { 807 return; 808 } 809 } else { 810 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 811 *failures = true; 812 return; 813 } 814 } 815 816 prev_p = p; 817 p += obj_size; 818 } 819 } 820 821 void HeapRegion::verify_rem_set() const { 822 bool failures = false; 823 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 824 guarantee(!failures, "HeapRegion RemSet verification failed"); 825 } 826 827 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 828 // Not used for G1 anymore, but pure virtual in Space. 829 ShouldNotReachHere(); 830 } 831 832 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 833 // away eventually. 834 835 void G1ContiguousSpace::clear(bool mangle_space) { 836 set_top(bottom()); 837 CompactibleSpace::clear(mangle_space); 838 reset_bot(); 839 } 840 #ifndef PRODUCT 841 void G1ContiguousSpace::mangle_unused_area() { 842 mangle_unused_area_complete(); 843 } 844 845 void G1ContiguousSpace::mangle_unused_area_complete() { 846 SpaceMangler::mangle_region(MemRegion(top(), end())); 847 } 848 #endif 849 850 void G1ContiguousSpace::print() const { 851 print_short(); 852 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 853 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 854 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); 855 } 856 857 HeapWord* G1ContiguousSpace::initialize_threshold() { 858 return _bot_part.initialize_threshold(); 859 } 860 861 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, 862 HeapWord* end) { 863 _bot_part.alloc_block(start, end); 864 return _bot_part.threshold(); 865 } 866 867 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 868 object_iterate(blk); 869 } 870 871 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { 872 HeapWord* p = bottom(); 873 while (p < top()) { 874 if (block_is_obj(p)) { 875 blk->do_object(oop(p)); 876 } 877 p += block_size(p); 878 } 879 } 880 881 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : 882 _top(NULL), 883 _bot_part(bot, this), 884 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 885 _pre_dummy_top(NULL) 886 { 887 } 888 889 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 890 CompactibleSpace::initialize(mr, clear_space, mangle_space); 891 _top = bottom(); 892 set_saved_mark_word(NULL); 893 reset_bot(); 894 }