1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1HeapRegionTraceType.hpp" 30 #include "gc/g1/g1OopClosures.inline.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionBounds.inline.hpp" 33 #include "gc/g1/heapRegionManager.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/g1/heapRegionTracer.hpp" 36 #include "gc/shared/genOopClosures.inline.hpp" 37 #include "gc/shared/space.inline.hpp" 38 #include "logging/log.hpp" 39 #include "logging/logStream.hpp" 40 #include "memory/iterator.inline.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/access.inline.hpp" 43 #include "oops/compressedOops.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.hpp" 46 #include "runtime/orderAccess.hpp" 47 #include "utilities/growableArray.hpp" 48 49 int HeapRegion::LogOfHRGrainBytes = 0; 50 int HeapRegion::LogOfHRGrainWords = 0; 51 size_t HeapRegion::GrainBytes = 0; 52 size_t HeapRegion::GrainWords = 0; 53 size_t HeapRegion::CardsPerRegion = 0; 54 55 size_t HeapRegion::max_region_size() { 56 return HeapRegionBounds::max_size(); 57 } 58 59 size_t HeapRegion::min_region_size_in_words() { 60 return HeapRegionBounds::min_size() >> LogHeapWordSize; 61 } 62 63 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 64 size_t region_size = G1HeapRegionSize; 65 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 66 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 67 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 68 HeapRegionBounds::min_size()); 69 } 70 71 int region_size_log = log2_long((jlong) region_size); 72 // Recalculate the region size to make sure it's a power of 73 // 2. This means that region_size is the largest power of 2 that's 74 // <= what we've calculated so far. 75 region_size = ((size_t)1 << region_size_log); 76 77 // Now make sure that we don't go over or under our limits. 78 if (region_size < HeapRegionBounds::min_size()) { 79 region_size = HeapRegionBounds::min_size(); 80 } else if (region_size > HeapRegionBounds::max_size()) { 81 region_size = HeapRegionBounds::max_size(); 82 } 83 84 // And recalculate the log. 85 region_size_log = log2_long((jlong) region_size); 86 87 // Now, set up the globals. 88 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 89 LogOfHRGrainBytes = region_size_log; 90 91 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 92 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 93 94 guarantee(GrainBytes == 0, "we should only set it once"); 95 // The cast to int is safe, given that we've bounded region_size by 96 // MIN_REGION_SIZE and MAX_REGION_SIZE. 97 GrainBytes = region_size; 98 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M); 99 100 guarantee(GrainWords == 0, "we should only set it once"); 101 GrainWords = GrainBytes >> LogHeapWordSize; 102 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 103 104 guarantee(CardsPerRegion == 0, "we should only set it once"); 105 CardsPerRegion = GrainBytes >> G1CardTable::card_shift; 106 107 if (G1HeapRegionSize != GrainBytes) { 108 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); 109 } 110 } 111 112 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) { 113 assert(_humongous_start_region == NULL, 114 "we should have already filtered out humongous regions"); 115 assert(!in_collection_set(), 116 "Should not clear heap region %u in the collection set", hrm_index()); 117 118 set_young_index_in_cset(-1); 119 uninstall_surv_rate_group(); 120 set_free(); 121 reset_pre_dummy_top(); 122 123 if (!keep_remset) { 124 if (locked) { 125 rem_set()->clear_locked(); 126 } else { 127 rem_set()->clear(); 128 } 129 } 130 131 zero_marked_bytes(); 132 133 init_top_at_mark_start(); 134 if (clear_space) clear(SpaceDecorator::Mangle); 135 } 136 137 void HeapRegion::clear_cardtable() { 138 G1CardTable* ct = G1CollectedHeap::heap()->card_table(); 139 ct->clear(MemRegion(bottom(), end())); 140 } 141 142 void HeapRegion::calc_gc_efficiency() { 143 // GC efficiency is the ratio of how much space would be 144 // reclaimed over how long we predict it would take to reclaim it. 145 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 146 G1Policy* g1p = g1h->g1_policy(); 147 148 // Retrieve a prediction of the elapsed time for this region for 149 // a mixed gc because the region will only be evacuated during a 150 // mixed gc. 151 double region_elapsed_time_ms = 152 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 153 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 154 } 155 156 void HeapRegion::set_free() { 157 report_region_type_change(G1HeapRegionTraceType::Free); 158 _type.set_free(); 159 } 160 161 void HeapRegion::set_eden() { 162 report_region_type_change(G1HeapRegionTraceType::Eden); 163 _type.set_eden(); 164 } 165 166 void HeapRegion::set_eden_pre_gc() { 167 report_region_type_change(G1HeapRegionTraceType::Eden); 168 _type.set_eden_pre_gc(); 169 } 170 171 void HeapRegion::set_survivor() { 172 report_region_type_change(G1HeapRegionTraceType::Survivor); 173 _type.set_survivor(); 174 } 175 176 void HeapRegion::move_to_old() { 177 if (_type.relabel_as_old()) { 178 report_region_type_change(G1HeapRegionTraceType::Old); 179 } 180 } 181 182 void HeapRegion::set_old() { 183 report_region_type_change(G1HeapRegionTraceType::Old); 184 _type.set_old(); 185 } 186 187 void HeapRegion::set_open_archive() { 188 report_region_type_change(G1HeapRegionTraceType::OpenArchive); 189 _type.set_open_archive(); 190 } 191 192 void HeapRegion::set_closed_archive() { 193 report_region_type_change(G1HeapRegionTraceType::ClosedArchive); 194 _type.set_closed_archive(); 195 } 196 197 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { 198 assert(!is_humongous(), "sanity / pre-condition"); 199 assert(top() == bottom(), "should be empty"); 200 201 report_region_type_change(G1HeapRegionTraceType::StartsHumongous); 202 _type.set_starts_humongous(); 203 _humongous_start_region = this; 204 205 _bot_part.set_for_starts_humongous(obj_top, fill_size); 206 } 207 208 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 209 assert(!is_humongous(), "sanity / pre-condition"); 210 assert(top() == bottom(), "should be empty"); 211 assert(first_hr->is_starts_humongous(), "pre-condition"); 212 213 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); 214 _type.set_continues_humongous(); 215 _humongous_start_region = first_hr; 216 217 _bot_part.set_object_can_span(true); 218 } 219 220 void HeapRegion::clear_humongous() { 221 assert(is_humongous(), "pre-condition"); 222 223 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 224 _humongous_start_region = NULL; 225 226 _bot_part.set_object_can_span(false); 227 } 228 229 HeapRegion::HeapRegion(uint hrm_index, 230 G1BlockOffsetTable* bot, 231 MemRegion mr) : 232 G1ContiguousSpace(bot), 233 _rem_set(NULL), 234 _hrm_index(hrm_index), 235 _type(), 236 _humongous_start_region(NULL), 237 _evacuation_failed(false), 238 _next(NULL), _prev(NULL), 239 #ifdef ASSERT 240 _containing_set(NULL), 241 #endif 242 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 243 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 244 _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL), 245 _recorded_rs_length(0), _predicted_elapsed_time_ms(0) 246 { 247 _rem_set = new HeapRegionRemSet(bot, this); 248 249 initialize(mr); 250 } 251 252 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 253 assert(_rem_set->is_empty(), "Remembered set must be empty"); 254 255 G1ContiguousSpace::initialize(mr, clear_space, mangle_space); 256 257 hr_clear(false /*par*/, false /*clear_space*/); 258 set_top(bottom()); 259 } 260 261 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { 262 HeapRegionTracer::send_region_type_change(_hrm_index, 263 get_trace_type(), 264 to, 265 (uintptr_t)bottom(), 266 used()); 267 } 268 269 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 270 bool during_conc_mark) { 271 // We always recreate the prev marking info and we'll explicitly 272 // mark all objects we find to be self-forwarded on the prev 273 // bitmap. So all objects need to be below PTAMS. 274 _prev_marked_bytes = 0; 275 276 if (during_initial_mark) { 277 // During initial-mark, we'll also explicitly mark all objects 278 // we find to be self-forwarded on the next bitmap. So all 279 // objects need to be below NTAMS. 280 _next_top_at_mark_start = top(); 281 _next_marked_bytes = 0; 282 } else if (during_conc_mark) { 283 // During concurrent mark, all objects in the CSet (including 284 // the ones we find to be self-forwarded) are implicitly live. 285 // So all objects need to be above NTAMS. 286 _next_top_at_mark_start = bottom(); 287 _next_marked_bytes = 0; 288 } 289 } 290 291 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) { 292 assert(marked_bytes <= used(), 293 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 294 _prev_top_at_mark_start = top(); 295 _prev_marked_bytes = marked_bytes; 296 } 297 298 // Code roots support 299 300 void HeapRegion::add_strong_code_root(nmethod* nm) { 301 HeapRegionRemSet* hrrs = rem_set(); 302 hrrs->add_strong_code_root(nm); 303 } 304 305 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 306 assert_locked_or_safepoint(CodeCache_lock); 307 HeapRegionRemSet* hrrs = rem_set(); 308 hrrs->add_strong_code_root_locked(nm); 309 } 310 311 void HeapRegion::remove_strong_code_root(nmethod* nm) { 312 HeapRegionRemSet* hrrs = rem_set(); 313 hrrs->remove_strong_code_root(nm); 314 } 315 316 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 317 HeapRegionRemSet* hrrs = rem_set(); 318 hrrs->strong_code_roots_do(blk); 319 } 320 321 class VerifyStrongCodeRootOopClosure: public OopClosure { 322 const HeapRegion* _hr; 323 bool _failures; 324 bool _has_oops_in_region; 325 326 template <class T> void do_oop_work(T* p) { 327 T heap_oop = RawAccess<>::oop_load(p); 328 if (!CompressedOops::is_null(heap_oop)) { 329 oop obj = CompressedOops::decode_not_null(heap_oop); 330 331 // Note: not all the oops embedded in the nmethod are in the 332 // current region. We only look at those which are. 333 if (_hr->is_in(obj)) { 334 // Object is in the region. Check that its less than top 335 if (_hr->top() <= (HeapWord*)obj) { 336 // Object is above top 337 log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT, 338 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 339 _failures = true; 340 return; 341 } 342 // Nmethod has at least one oop in the current region 343 _has_oops_in_region = true; 344 } 345 } 346 } 347 348 public: 349 VerifyStrongCodeRootOopClosure(const HeapRegion* hr): 350 _hr(hr), _failures(false), _has_oops_in_region(false) {} 351 352 void do_oop(narrowOop* p) { do_oop_work(p); } 353 void do_oop(oop* p) { do_oop_work(p); } 354 355 bool failures() { return _failures; } 356 bool has_oops_in_region() { return _has_oops_in_region; } 357 }; 358 359 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 360 const HeapRegion* _hr; 361 bool _failures; 362 public: 363 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 364 _hr(hr), _failures(false) {} 365 366 void do_code_blob(CodeBlob* cb) { 367 nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); 368 if (nm != NULL) { 369 // Verify that the nemthod is live 370 if (!nm->is_alive()) { 371 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 372 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 373 _failures = true; 374 } else { 375 VerifyStrongCodeRootOopClosure oop_cl(_hr); 376 nm->oops_do(&oop_cl); 377 if (!oop_cl.has_oops_in_region()) { 378 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 379 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 380 _failures = true; 381 } else if (oop_cl.failures()) { 382 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 383 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 384 _failures = true; 385 } 386 } 387 } 388 } 389 390 bool failures() { return _failures; } 391 }; 392 393 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 394 if (!G1VerifyHeapRegionCodeRoots) { 395 // We're not verifying code roots. 396 return; 397 } 398 if (vo == VerifyOption_G1UseFullMarking) { 399 // Marking verification during a full GC is performed after class 400 // unloading, code cache unloading, etc so the strong code roots 401 // attached to each heap region are in an inconsistent state. They won't 402 // be consistent until the strong code roots are rebuilt after the 403 // actual GC. Skip verifying the strong code roots in this particular 404 // time. 405 assert(VerifyDuringGC, "only way to get here"); 406 return; 407 } 408 409 HeapRegionRemSet* hrrs = rem_set(); 410 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 411 412 // if this region is empty then there should be no entries 413 // on its strong code root list 414 if (is_empty()) { 415 if (strong_code_roots_length > 0) { 416 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries", 417 p2i(bottom()), p2i(end()), strong_code_roots_length); 418 *failures = true; 419 } 420 return; 421 } 422 423 if (is_continues_humongous()) { 424 if (strong_code_roots_length > 0) { 425 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 426 HR_FORMAT_PARAMS(this), strong_code_roots_length); 427 *failures = true; 428 } 429 return; 430 } 431 432 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 433 strong_code_roots_do(&cb_cl); 434 435 if (cb_cl.failures()) { 436 *failures = true; 437 } 438 } 439 440 void HeapRegion::print() const { print_on(tty); } 441 void HeapRegion::print_on(outputStream* st) const { 442 st->print("|%4u", this->_hrm_index); 443 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, 444 p2i(bottom()), p2i(top()), p2i(end())); 445 st->print("|%3d%%", (int) ((double) used() * 100 / capacity())); 446 st->print("|%2s", get_short_type_str()); 447 if (in_collection_set()) { 448 st->print("|CS"); 449 } else { 450 st->print("| "); 451 } 452 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ", 453 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str()); 454 } 455 456 class G1VerificationClosure : public BasicOopIterateClosure { 457 protected: 458 G1CollectedHeap* _g1h; 459 G1CardTable *_ct; 460 oop _containing_obj; 461 bool _failures; 462 int _n_failures; 463 VerifyOption _vo; 464 public: 465 // _vo == UsePrevMarking -> use "prev" marking information, 466 // _vo == UseNextMarking -> use "next" marking information, 467 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. 468 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 469 _g1h(g1h), _ct(g1h->card_table()), 470 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { 471 } 472 473 void set_containing_obj(oop obj) { 474 _containing_obj = obj; 475 } 476 477 bool failures() { return _failures; } 478 int n_failures() { return _n_failures; } 479 480 void print_object(outputStream* out, oop obj) { 481 #ifdef PRODUCT 482 Klass* k = obj->klass(); 483 const char* class_name = k->external_name(); 484 out->print_cr("class name %s", class_name); 485 #else // PRODUCT 486 obj->print_on(out); 487 #endif // PRODUCT 488 } 489 490 // This closure provides its own oop verification code. 491 debug_only(virtual bool should_verify_oops() { return false; }) 492 }; 493 494 class VerifyLiveClosure : public G1VerificationClosure { 495 public: 496 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 497 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 498 virtual void do_oop(oop* p) { do_oop_work(p); } 499 500 template <class T> 501 void do_oop_work(T* p) { 502 assert(_containing_obj != NULL, "Precondition"); 503 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 504 "Precondition"); 505 verify_liveness(p); 506 } 507 508 template <class T> 509 void verify_liveness(T* p) { 510 T heap_oop = RawAccess<>::oop_load(p); 511 Log(gc, verify) log; 512 if (!CompressedOops::is_null(heap_oop)) { 513 oop obj = CompressedOops::decode_not_null(heap_oop); 514 bool failed = false; 515 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 516 MutexLocker x(ParGCRareEvent_lock); 517 518 if (!_failures) { 519 log.error("----------"); 520 } 521 ResourceMark rm; 522 if (!_g1h->is_in_closed_subset(obj)) { 523 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 524 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 525 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 526 LogStream ls(log.error()); 527 print_object(&ls, _containing_obj); 528 HeapRegion* const to = _g1h->heap_region_containing(obj); 529 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); 530 } else { 531 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 532 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 533 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 534 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 535 LogStream ls(log.error()); 536 print_object(&ls, _containing_obj); 537 log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 538 p2i(obj), p2i(to->bottom()), p2i(to->end())); 539 print_object(&ls, obj); 540 } 541 log.error("----------"); 542 _failures = true; 543 failed = true; 544 _n_failures++; 545 } 546 } 547 } 548 }; 549 550 class VerifyRemSetClosure : public G1VerificationClosure { 551 public: 552 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 553 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 554 virtual void do_oop(oop* p) { do_oop_work(p); } 555 556 template <class T> 557 void do_oop_work(T* p) { 558 assert(_containing_obj != NULL, "Precondition"); 559 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 560 "Precondition"); 561 verify_remembered_set(p); 562 } 563 564 template <class T> 565 void verify_remembered_set(T* p) { 566 T heap_oop = RawAccess<>::oop_load(p); 567 Log(gc, verify) log; 568 if (!CompressedOops::is_null(heap_oop)) { 569 oop obj = CompressedOops::decode_not_null(heap_oop); 570 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 571 HeapRegion* to = _g1h->heap_region_containing(obj); 572 if (from != NULL && to != NULL && 573 from != to && 574 !to->is_pinned() && 575 to->rem_set()->is_complete()) { 576 jbyte cv_obj = *_ct->byte_for_const(_containing_obj); 577 jbyte cv_field = *_ct->byte_for_const(p); 578 const jbyte dirty = G1CardTable::dirty_card_val(); 579 580 bool is_bad = !(from->is_young() 581 || to->rem_set()->contains_reference(p) 582 || (_containing_obj->is_objArray() ? 583 cv_field == dirty : 584 cv_obj == dirty || cv_field == dirty)); 585 if (is_bad) { 586 MutexLocker x(ParGCRareEvent_lock); 587 588 if (!_failures) { 589 log.error("----------"); 590 } 591 log.error("Missing rem set entry:"); 592 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT, 593 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 594 ResourceMark rm; 595 LogStream ls(log.error()); 596 _containing_obj->print_on(&ls); 597 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); 598 if (oopDesc::is_oop(obj)) { 599 obj->print_on(&ls); 600 } 601 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 602 log.error("----------"); 603 _failures = true; 604 _n_failures++; 605 } 606 } 607 } 608 } 609 }; 610 611 // Closure that applies the given two closures in sequence. 612 class G1Mux2Closure : public BasicOopIterateClosure { 613 OopClosure* _c1; 614 OopClosure* _c2; 615 public: 616 G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; } 617 template <class T> inline void do_oop_work(T* p) { 618 // Apply first closure; then apply the second. 619 _c1->do_oop(p); 620 _c2->do_oop(p); 621 } 622 virtual inline void do_oop(oop* p) { do_oop_work(p); } 623 virtual inline void do_oop(narrowOop* p) { do_oop_work(p); } 624 625 // This closure provides its own oop verification code. 626 debug_only(virtual bool should_verify_oops() { return false; }) 627 }; 628 629 // This really ought to be commoned up into OffsetTableContigSpace somehow. 630 // We would need a mechanism to make that code skip dead objects. 631 632 void HeapRegion::verify(VerifyOption vo, 633 bool* failures) const { 634 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 635 *failures = false; 636 HeapWord* p = bottom(); 637 HeapWord* prev_p = NULL; 638 VerifyLiveClosure vl_cl(g1h, vo); 639 VerifyRemSetClosure vr_cl(g1h, vo); 640 bool is_region_humongous = is_humongous(); 641 size_t object_num = 0; 642 while (p < top()) { 643 oop obj = oop(p); 644 size_t obj_size = block_size(p); 645 object_num += 1; 646 647 if (!g1h->is_obj_dead_cond(obj, this, vo)) { 648 if (oopDesc::is_oop(obj)) { 649 Klass* klass = obj->klass(); 650 bool is_metaspace_object = Metaspace::contains(klass); 651 if (!is_metaspace_object) { 652 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 653 "not metadata", p2i(klass), p2i(obj)); 654 *failures = true; 655 return; 656 } else if (!klass->is_klass()) { 657 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 658 "not a klass", p2i(klass), p2i(obj)); 659 *failures = true; 660 return; 661 } else { 662 vl_cl.set_containing_obj(obj); 663 if (!g1h->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) { 664 // verify liveness and rem_set 665 vr_cl.set_containing_obj(obj); 666 G1Mux2Closure mux(&vl_cl, &vr_cl); 667 obj->oop_iterate(&mux); 668 669 if (vr_cl.failures()) { 670 *failures = true; 671 } 672 if (G1MaxVerifyFailures >= 0 && 673 vr_cl.n_failures() >= G1MaxVerifyFailures) { 674 return; 675 } 676 } else { 677 // verify only liveness 678 obj->oop_iterate(&vl_cl); 679 } 680 if (vl_cl.failures()) { 681 *failures = true; 682 } 683 if (G1MaxVerifyFailures >= 0 && 684 vl_cl.n_failures() >= G1MaxVerifyFailures) { 685 return; 686 } 687 } 688 } else { 689 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 690 *failures = true; 691 return; 692 } 693 } 694 prev_p = p; 695 p += obj_size; 696 } 697 698 if (!is_young() && !is_empty()) { 699 _bot_part.verify(); 700 } 701 702 if (is_region_humongous) { 703 oop obj = oop(this->humongous_start_region()->bottom()); 704 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 705 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 706 *failures = true; 707 return; 708 } 709 } 710 711 if (!is_region_humongous && p != top()) { 712 log_error(gc, verify)("end of last object " PTR_FORMAT " " 713 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 714 *failures = true; 715 return; 716 } 717 718 HeapWord* the_end = end(); 719 // Do some extra BOT consistency checking for addresses in the 720 // range [top, end). BOT look-ups in this range should yield 721 // top. No point in doing that if top == end (there's nothing there). 722 if (p < the_end) { 723 // Look up top 724 HeapWord* addr_1 = p; 725 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); 726 if (b_start_1 != p) { 727 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " 728 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 729 p2i(addr_1), p2i(b_start_1), p2i(p)); 730 *failures = true; 731 return; 732 } 733 734 // Look up top + 1 735 HeapWord* addr_2 = p + 1; 736 if (addr_2 < the_end) { 737 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); 738 if (b_start_2 != p) { 739 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 740 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 741 p2i(addr_2), p2i(b_start_2), p2i(p)); 742 *failures = true; 743 return; 744 } 745 } 746 747 // Look up an address between top and end 748 size_t diff = pointer_delta(the_end, p) / 2; 749 HeapWord* addr_3 = p + diff; 750 if (addr_3 < the_end) { 751 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); 752 if (b_start_3 != p) { 753 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 754 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 755 p2i(addr_3), p2i(b_start_3), p2i(p)); 756 *failures = true; 757 return; 758 } 759 } 760 761 // Look up end - 1 762 HeapWord* addr_4 = the_end - 1; 763 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); 764 if (b_start_4 != p) { 765 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 766 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 767 p2i(addr_4), p2i(b_start_4), p2i(p)); 768 *failures = true; 769 return; 770 } 771 } 772 773 verify_strong_code_roots(vo, failures); 774 } 775 776 void HeapRegion::verify() const { 777 bool dummy = false; 778 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 779 } 780 781 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 782 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 783 *failures = false; 784 HeapWord* p = bottom(); 785 HeapWord* prev_p = NULL; 786 VerifyRemSetClosure vr_cl(g1h, vo); 787 while (p < top()) { 788 oop obj = oop(p); 789 size_t obj_size = block_size(p); 790 791 if (!g1h->is_obj_dead_cond(obj, this, vo)) { 792 if (oopDesc::is_oop(obj)) { 793 vr_cl.set_containing_obj(obj); 794 obj->oop_iterate(&vr_cl); 795 796 if (vr_cl.failures()) { 797 *failures = true; 798 } 799 if (G1MaxVerifyFailures >= 0 && 800 vr_cl.n_failures() >= G1MaxVerifyFailures) { 801 return; 802 } 803 } else { 804 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 805 *failures = true; 806 return; 807 } 808 } 809 810 prev_p = p; 811 p += obj_size; 812 } 813 } 814 815 void HeapRegion::verify_rem_set() const { 816 bool failures = false; 817 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 818 guarantee(!failures, "HeapRegion RemSet verification failed"); 819 } 820 821 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 822 // Not used for G1 anymore, but pure virtual in Space. 823 ShouldNotReachHere(); 824 } 825 826 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 827 // away eventually. 828 829 void G1ContiguousSpace::clear(bool mangle_space) { 830 set_top(bottom()); 831 CompactibleSpace::clear(mangle_space); 832 reset_bot(); 833 } 834 #ifndef PRODUCT 835 void G1ContiguousSpace::mangle_unused_area() { 836 mangle_unused_area_complete(); 837 } 838 839 void G1ContiguousSpace::mangle_unused_area_complete() { 840 SpaceMangler::mangle_region(MemRegion(top(), end())); 841 } 842 #endif 843 844 void G1ContiguousSpace::print() const { 845 print_short(); 846 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 847 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 848 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); 849 } 850 851 HeapWord* G1ContiguousSpace::initialize_threshold() { 852 return _bot_part.initialize_threshold(); 853 } 854 855 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, 856 HeapWord* end) { 857 _bot_part.alloc_block(start, end); 858 return _bot_part.threshold(); 859 } 860 861 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 862 object_iterate(blk); 863 } 864 865 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { 866 HeapWord* p = bottom(); 867 while (p < top()) { 868 if (block_is_obj(p)) { 869 blk->do_object(oop(p)); 870 } 871 p += block_size(p); 872 } 873 } 874 875 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : 876 _bot_part(bot, this), 877 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 878 { 879 } 880 881 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 882 CompactibleSpace::initialize(mr, clear_space, mangle_space); 883 _top = bottom(); 884 set_saved_mark_word(NULL); 885 reset_bot(); 886 } --- EOF ---