1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/cardTableRS.hpp" 28 #include "memory/genCollectedHeap.hpp" 29 #include "memory/generation.hpp" 30 #include "memory/space.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/atomic.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/os.hpp" 35 #include "utilities/macros.hpp" 36 #if INCLUDE_ALL_GCS 37 #include "gc_implementation/g1/concurrentMark.hpp" 38 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 39 #endif // INCLUDE_ALL_GCS 40 41 CardTableRS::CardTableRS(MemRegion whole_heap, 42 int max_covered_regions) : 43 GenRemSet(), 44 _cur_youngergen_card_val(youngergenP1_card), 45 _regions_to_iterate(max_covered_regions - 1) 46 { 47 #if INCLUDE_ALL_GCS 48 if (UseG1GC) { 49 _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap, 50 max_covered_regions); 51 } else { 52 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); 53 } 54 #else 55 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); 56 #endif 57 set_bs(_ct_bs); 58 _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1, 59 mtGC, 0, AllocFailStrategy::RETURN_NULL); 60 if (_last_cur_val_in_gen == NULL) { 61 vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); 62 } 63 for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) { 64 _last_cur_val_in_gen[i] = clean_card_val(); 65 } 66 _ct_bs->set_CTRS(this); 67 } 68 69 CardTableRS::~CardTableRS() { 70 if (_ct_bs) { 71 delete _ct_bs; 72 _ct_bs = NULL; 73 } 74 if (_last_cur_val_in_gen) { 75 FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen, mtInternal); 76 } 77 } 78 79 void CardTableRS::resize_covered_region(MemRegion new_region) { 80 _ct_bs->resize_covered_region(new_region); 81 } 82 83 jbyte CardTableRS::find_unused_youngergenP_card_value() { 84 for (jbyte v = youngergenP1_card; 85 v < cur_youngergen_and_prev_nonclean_card; 86 v++) { 87 bool seen = false; 88 for (int g = 0; g < _regions_to_iterate; g++) { 89 if (_last_cur_val_in_gen[g] == v) { 90 seen = true; 91 break; 92 } 93 } 94 if (!seen) return v; 95 } 96 ShouldNotReachHere(); 97 return 0; 98 } 99 100 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 101 // Parallel or sequential, we must always set the prev to equal the 102 // last one written. 103 if (parallel) { 104 // Find a parallel value to be used next. 105 jbyte next_val = find_unused_youngergenP_card_value(); 106 set_cur_youngergen_card_val(next_val); 107 108 } else { 109 // In an sequential traversal we will always write youngergen, so that 110 // the inline barrier is correct. 111 set_cur_youngergen_card_val(youngergen_card); 112 } 113 } 114 115 void CardTableRS::younger_refs_iterate(Generation* g, 116 OopsInGenClosure* blk) { 117 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); 118 g->younger_refs_iterate(blk); 119 } 120 121 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { 122 if (_is_par) { 123 return clear_card_parallel(entry); 124 } else { 125 return clear_card_serial(entry); 126 } 127 } 128 129 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { 130 while (true) { 131 // In the parallel case, we may have to do this several times. 132 jbyte entry_val = *entry; 133 assert(entry_val != CardTableRS::clean_card_val(), 134 "We shouldn't be looking at clean cards, and this should " 135 "be the only place they get cleaned."); 136 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 137 || _ct->is_prev_youngergen_card_val(entry_val)) { 138 jbyte res = 139 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 140 if (res == entry_val) { 141 break; 142 } else { 143 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 144 "The CAS above should only fail if another thread did " 145 "a GC write barrier."); 146 } 147 } else if (entry_val == 148 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 149 // Parallelism shouldn't matter in this case. Only the thread 150 // assigned to scan the card should change this value. 151 *entry = _ct->cur_youngergen_card_val(); 152 break; 153 } else { 154 assert(entry_val == _ct->cur_youngergen_card_val(), 155 "Should be the only possibility."); 156 // In this case, the card was clean before, and become 157 // cur_youngergen only because of processing of a promoted object. 158 // We don't have to look at the card. 159 return false; 160 } 161 } 162 return true; 163 } 164 165 166 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 167 jbyte entry_val = *entry; 168 assert(entry_val != CardTableRS::clean_card_val(), 169 "We shouldn't be looking at clean cards, and this should " 170 "be the only place they get cleaned."); 171 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 172 "This should be possible in the sequential case."); 173 *entry = CardTableRS::clean_card_val(); 174 return true; 175 } 176 177 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 178 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : 179 _dirty_card_closure(dirty_card_closure), _ct(ct) { 180 // Cannot yet substitute active_workers for n_par_threads 181 // in the case where parallelism is being turned off by 182 // setting n_par_threads to 0. 183 _is_par = (SharedHeap::heap()->n_par_threads() > 0); 184 assert(!_is_par || 185 (SharedHeap::heap()->n_par_threads() == 186 SharedHeap::heap()->workers()->active_workers()), "Mismatch"); 187 } 188 189 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 190 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 191 } 192 193 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 194 assert(mr.word_size() > 0, "Error"); 195 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 196 // mr.end() may not necessarily be card aligned. 197 jbyte* cur_entry = _ct->byte_for(mr.last()); 198 const jbyte* limit = _ct->byte_for(mr.start()); 199 HeapWord* end_of_non_clean = mr.end(); 200 HeapWord* start_of_non_clean = end_of_non_clean; 201 while (cur_entry >= limit) { 202 HeapWord* cur_hw = _ct->addr_for(cur_entry); 203 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 204 // Continue the dirty range by opening the 205 // dirty window one card to the left. 206 start_of_non_clean = cur_hw; 207 } else { 208 // We hit a "clean" card; process any non-empty 209 // "dirty" range accumulated so far. 210 if (start_of_non_clean < end_of_non_clean) { 211 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 212 _dirty_card_closure->do_MemRegion(mrd); 213 } 214 215 // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary 216 if (is_word_aligned(cur_entry)) { 217 jbyte* cur_row = cur_entry - BytesPerWord; 218 while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { 219 cur_row -= BytesPerWord; 220 } 221 cur_entry = cur_row + BytesPerWord; 222 cur_hw = _ct->addr_for(cur_entry); 223 } 224 225 // Reset the dirty window, while continuing to look 226 // for the next dirty card that will start a 227 // new dirty window. 228 end_of_non_clean = cur_hw; 229 start_of_non_clean = cur_hw; 230 } 231 // Note that "cur_entry" leads "start_of_non_clean" in 232 // its leftward excursion after this point 233 // in the loop and, when we hit the left end of "mr", 234 // will point off of the left end of the card-table 235 // for "mr". 236 cur_entry--; 237 } 238 // If the first card of "mr" was dirty, we will have 239 // been left with a dirty window, co-initial with "mr", 240 // which we now process. 241 if (start_of_non_clean < end_of_non_clean) { 242 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 243 _dirty_card_closure->do_MemRegion(mrd); 244 } 245 } 246 247 // clean (by dirty->clean before) ==> cur_younger_gen 248 // dirty ==> cur_youngergen_and_prev_nonclean_card 249 // precleaned ==> cur_youngergen_and_prev_nonclean_card 250 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 251 // cur-younger-gen ==> cur_younger_gen 252 // cur_youngergen_and_prev_nonclean_card ==> no change. 253 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 254 jbyte* entry = ct_bs()->byte_for(field); 255 do { 256 jbyte entry_val = *entry; 257 // We put this first because it's probably the most common case. 258 if (entry_val == clean_card_val()) { 259 // No threat of contention with cleaning threads. 260 *entry = cur_youngergen_card_val(); 261 return; 262 } else if (card_is_dirty_wrt_gen_iter(entry_val) 263 || is_prev_youngergen_card_val(entry_val)) { 264 // Mark it as both cur and prev youngergen; card cleaning thread will 265 // eventually remove the previous stuff. 266 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 267 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 268 // Did the CAS succeed? 269 if (res == entry_val) return; 270 // Otherwise, retry, to see the new value. 271 continue; 272 } else { 273 assert(entry_val == cur_youngergen_and_prev_nonclean_card 274 || entry_val == cur_youngergen_card_val(), 275 "should be only possibilities."); 276 return; 277 } 278 } while (true); 279 } 280 281 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 282 OopsInGenClosure* cl) { 283 const MemRegion urasm = sp->used_region_at_save_marks(); 284 #ifdef ASSERT 285 // Convert the assertion check to a warning if we are running 286 // CMS+ParNew until related bug is fixed. 287 MemRegion ur = sp->used_region(); 288 assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC), 289 err_msg("Did you forget to call save_marks()? " 290 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 291 "[" PTR_FORMAT ", " PTR_FORMAT ")", 292 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()))); 293 // In the case of CMS+ParNew, issue a warning 294 if (!ur.contains(urasm)) { 295 assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above"); 296 warning("CMS+ParNew: Did you forget to call save_marks()? " 297 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 298 "[" PTR_FORMAT ", " PTR_FORMAT ")", 299 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 300 MemRegion ur2 = sp->used_region(); 301 MemRegion urasm2 = sp->used_region_at_save_marks(); 302 if (!ur.equals(ur2)) { 303 warning("CMS+ParNew: Flickering used_region()!!"); 304 } 305 if (!urasm.equals(urasm2)) { 306 warning("CMS+ParNew: Flickering used_region_at_save_marks()!!"); 307 } 308 ShouldNotReachHere(); 309 } 310 #endif 311 _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); 312 } 313 314 void CardTableRS::clear_into_younger(Generation* old_gen) { 315 assert(old_gen->level() == 1, "Should only be called for the old generation"); 316 // The card tables for the youngest gen need never be cleared. 317 // There's a bit of subtlety in the clear() and invalidate() 318 // methods that we exploit here and in invalidate_or_clear() 319 // below to avoid missing cards at the fringes. If clear() or 320 // invalidate() are changed in the future, this code should 321 // be revisited. 20040107.ysr 322 clear(old_gen->prev_used_region()); 323 } 324 325 void CardTableRS::invalidate_or_clear(Generation* old_gen) { 326 assert(old_gen->level() == 1, "Should only be called for the old generation"); 327 // Invalidate the cards for the currently occupied part of 328 // the old generation and clear the cards for the 329 // unoccupied part of the generation (if any, making use 330 // of that generation's prev_used_region to determine that 331 // region). No need to do anything for the youngest 332 // generation. Also see note#20040107.ysr above. 333 MemRegion used_mr = old_gen->used_region(); 334 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr); 335 if (!to_be_cleared_mr.is_empty()) { 336 clear(to_be_cleared_mr); 337 } 338 invalidate(used_mr); 339 } 340 341 342 class VerifyCleanCardClosure: public OopClosure { 343 private: 344 HeapWord* _boundary; 345 HeapWord* _begin; 346 HeapWord* _end; 347 protected: 348 template <class T> void do_oop_work(T* p) { 349 HeapWord* jp = (HeapWord*)p; 350 assert(jp >= _begin && jp < _end, 351 err_msg("Error: jp " PTR_FORMAT " should be within " 352 "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", 353 p2i(jp), p2i(_begin), p2i(_end))); 354 oop obj = oopDesc::load_decode_heap_oop(p); 355 guarantee(obj == NULL || (HeapWord*)obj >= _boundary, 356 err_msg("pointer " PTR_FORMAT " at " PTR_FORMAT " on " 357 "clean card crosses boundary" PTR_FORMAT, 358 p2i((HeapWord*)obj), p2i(jp), p2i(_boundary))); 359 } 360 361 public: 362 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 363 _boundary(b), _begin(begin), _end(end) { 364 assert(b <= begin, 365 err_msg("Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, 366 p2i(b), p2i(begin))); 367 assert(begin <= end, 368 err_msg("Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, 369 p2i(begin), p2i(end))); 370 } 371 372 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 373 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 374 }; 375 376 class VerifyCTSpaceClosure: public SpaceClosure { 377 private: 378 CardTableRS* _ct; 379 HeapWord* _boundary; 380 public: 381 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 382 _ct(ct), _boundary(boundary) {} 383 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 384 }; 385 386 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 387 CardTableRS* _ct; 388 public: 389 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 390 void do_generation(Generation* gen) { 391 // Skip the youngest generation. 392 if (gen->level() == 0) return; 393 // Normally, we're interested in pointers to younger generations. 394 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 395 gen->space_iterate(&blk, true); 396 } 397 }; 398 399 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 400 // We don't need to do young-gen spaces. 401 if (s->end() <= gen_boundary) return; 402 MemRegion used = s->used_region(); 403 404 jbyte* cur_entry = byte_for(used.start()); 405 jbyte* limit = byte_after(used.last()); 406 while (cur_entry < limit) { 407 if (*cur_entry == CardTableModRefBS::clean_card) { 408 jbyte* first_dirty = cur_entry+1; 409 while (first_dirty < limit && 410 *first_dirty == CardTableModRefBS::clean_card) { 411 first_dirty++; 412 } 413 // If the first object is a regular object, and it has a 414 // young-to-old field, that would mark the previous card. 415 HeapWord* boundary = addr_for(cur_entry); 416 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 417 HeapWord* boundary_block = s->block_start(boundary); 418 HeapWord* begin = boundary; // Until proven otherwise. 419 HeapWord* start_block = boundary_block; // Until proven otherwise. 420 if (boundary_block < boundary) { 421 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 422 oop boundary_obj = oop(boundary_block); 423 if (!boundary_obj->is_objArray() && 424 !boundary_obj->is_typeArray()) { 425 guarantee(cur_entry > byte_for(used.start()), 426 "else boundary would be boundary_block"); 427 if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) { 428 begin = boundary_block + s->block_size(boundary_block); 429 start_block = begin; 430 } 431 } 432 } 433 } 434 // Now traverse objects until end. 435 if (begin < end) { 436 MemRegion mr(begin, end); 437 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 438 for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { 439 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 440 oop(cur)->oop_iterate_no_header(&verify_blk, mr); 441 } 442 } 443 } 444 cur_entry = first_dirty; 445 } else { 446 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 447 // is a transient value, that cannot be in the card table 448 // except during GC, and thus assert that: 449 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 450 // "Illegal CT value"); 451 // That however, need not hold, as will become clear in the 452 // following... 453 454 // We'd normally expect that if we are in the parallel case, 455 // we can't have left a prev value (which would be different 456 // from the current value) in the card table, and so we'd like to 457 // assert that: 458 // guarantee(cur_youngergen_card_val() == youngergen_card 459 // || !is_prev_youngergen_card_val(*cur_entry), 460 // "Illegal CT value"); 461 // That, however, may not hold occasionally, because of 462 // CMS or MSC in the old gen. To wit, consider the 463 // following two simple illustrative scenarios: 464 // (a) CMS: Consider the case where a large object L 465 // spanning several cards is allocated in the old 466 // gen, and has a young gen reference stored in it, dirtying 467 // some interior cards. A young collection scans the card, 468 // finds a young ref and installs a youngergenP_n value. 469 // L then goes dead. Now a CMS collection starts, 470 // finds L dead and sweeps it up. Assume that L is 471 // abutting _unallocated_blk, so _unallocated_blk is 472 // adjusted down to (below) L. Assume further that 473 // no young collection intervenes during this CMS cycle. 474 // The next young gen cycle will not get to look at this 475 // youngergenP_n card since it lies in the unoccupied 476 // part of the space. 477 // Some young collections later the blocks on this 478 // card can be re-allocated either due to direct allocation 479 // or due to absorbing promotions. At this time, the 480 // before-gc verification will fail the above assert. 481 // (b) MSC: In this case, an object L with a young reference 482 // is on a card that (therefore) holds a youngergen_n value. 483 // Suppose also that L lies towards the end of the used 484 // the used space before GC. An MSC collection 485 // occurs that compacts to such an extent that this 486 // card is no longer in the occupied part of the space. 487 // Since current code in MSC does not always clear cards 488 // in the unused part of old gen, this stale youngergen_n 489 // value is left behind and can later be covered by 490 // an object when promotion or direct allocation 491 // re-allocates that part of the heap. 492 // 493 // Fortunately, the presence of such stale card values is 494 // "only" a minor annoyance in that subsequent young collections 495 // might needlessly scan such cards, but would still never corrupt 496 // the heap as a result. However, it's likely not to be a significant 497 // performance inhibitor in practice. For instance, 498 // some recent measurements with unoccupied cards eagerly cleared 499 // out to maintain this invariant, showed next to no 500 // change in young collection times; of course one can construct 501 // degenerate examples where the cost can be significant.) 502 // Note, in particular, that if the "stale" card is modified 503 // after re-allocation, it would be dirty, not "stale". Thus, 504 // we can never have a younger ref in such a card and it is 505 // safe not to scan that card in any collection. [As we see 506 // below, we do some unnecessary scanning 507 // in some cases in the current parallel scanning algorithm.] 508 // 509 // The main point below is that the parallel card scanning code 510 // deals correctly with these stale card values. There are two main 511 // cases to consider where we have a stale "younger gen" value and a 512 // "derivative" case to consider, where we have a stale 513 // "cur_younger_gen_and_prev_non_clean" value, as will become 514 // apparent in the case analysis below. 515 // o Case 1. If the stale value corresponds to a younger_gen_n 516 // value other than the cur_younger_gen value then the code 517 // treats this as being tantamount to a prev_younger_gen 518 // card. This means that the card may be unnecessarily scanned. 519 // There are two sub-cases to consider: 520 // o Case 1a. Let us say that the card is in the occupied part 521 // of the generation at the time the collection begins. In 522 // that case the card will be either cleared when it is scanned 523 // for young pointers, or will be set to cur_younger_gen as a 524 // result of promotion. (We have elided the normal case where 525 // the scanning thread and the promoting thread interleave 526 // possibly resulting in a transient 527 // cur_younger_gen_and_prev_non_clean value before settling 528 // to cur_younger_gen. [End Case 1a.] 529 // o Case 1b. Consider now the case when the card is in the unoccupied 530 // part of the space which becomes occupied because of promotions 531 // into it during the current young GC. In this case the card 532 // will never be scanned for young references. The current 533 // code will set the card value to either 534 // cur_younger_gen_and_prev_non_clean or leave 535 // it with its stale value -- because the promotions didn't 536 // result in any younger refs on that card. Of these two 537 // cases, the latter will be covered in Case 1a during 538 // a subsequent scan. To deal with the former case, we need 539 // to further consider how we deal with a stale value of 540 // cur_younger_gen_and_prev_non_clean in our case analysis 541 // below. This we do in Case 3 below. [End Case 1b] 542 // [End Case 1] 543 // o Case 2. If the stale value corresponds to cur_younger_gen being 544 // a value not necessarily written by a current promotion, the 545 // card will not be scanned by the younger refs scanning code. 546 // (This is OK since as we argued above such cards cannot contain 547 // any younger refs.) The result is that this value will be 548 // treated as a prev_younger_gen value in a subsequent collection, 549 // which is addressed in Case 1 above. [End Case 2] 550 // o Case 3. We here consider the "derivative" case from Case 1b. above 551 // because of which we may find a stale 552 // cur_younger_gen_and_prev_non_clean card value in the table. 553 // Once again, as in Case 1, we consider two subcases, depending 554 // on whether the card lies in the occupied or unoccupied part 555 // of the space at the start of the young collection. 556 // o Case 3a. Let us say the card is in the occupied part of 557 // the old gen at the start of the young collection. In that 558 // case, the card will be scanned by the younger refs scanning 559 // code which will set it to cur_younger_gen. In a subsequent 560 // scan, the card will be considered again and get its final 561 // correct value. [End Case 3a] 562 // o Case 3b. Now consider the case where the card is in the 563 // unoccupied part of the old gen, and is occupied as a result 564 // of promotions during thus young gc. In that case, 565 // the card will not be scanned for younger refs. The presence 566 // of newly promoted objects on the card will then result in 567 // its keeping the value cur_younger_gen_and_prev_non_clean 568 // value, which we have dealt with in Case 3 here. [End Case 3b] 569 // [End Case 3] 570 // 571 // (Please refer to the code in the helper class 572 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.) 573 // 574 // The informal arguments above can be tightened into a formal 575 // correctness proof and it behooves us to write up such a proof, 576 // or to use model checking to prove that there are no lingering 577 // concerns. 578 // 579 // Clearly because of Case 3b one cannot bound the time for 580 // which a card will retain what we have called a "stale" value. 581 // However, one can obtain a Loose upper bound on the redundant 582 // work as a result of such stale values. Note first that any 583 // time a stale card lies in the occupied part of the space at 584 // the start of the collection, it is scanned by younger refs 585 // code and we can define a rank function on card values that 586 // declines when this is so. Note also that when a card does not 587 // lie in the occupied part of the space at the beginning of a 588 // young collection, its rank can either decline or stay unchanged. 589 // In this case, no extra work is done in terms of redundant 590 // younger refs scanning of that card. 591 // Then, the case analysis above reveals that, in the worst case, 592 // any such stale card will be scanned unnecessarily at most twice. 593 // 594 // It is nonetheless advisable to try and get rid of some of this 595 // redundant work in a subsequent (low priority) re-design of 596 // the card-scanning code, if only to simplify the underlying 597 // state machine analysis/proof. ysr 1/28/2002. XXX 598 cur_entry++; 599 } 600 } 601 } 602 603 void CardTableRS::verify() { 604 // At present, we only know how to verify the card table RS for 605 // generational heaps. 606 VerifyCTGenClosure blk(this); 607 CollectedHeap* ch = Universe::heap(); 608 609 if (ch->kind() == CollectedHeap::GenCollectedHeap) { 610 GenCollectedHeap::heap()->generation_iterate(&blk, false); 611 _ct_bs->verify(); 612 } 613 } 614 615 616 void CardTableRS::verify_aligned_region_empty(MemRegion mr) { 617 if (!mr.is_empty()) { 618 jbyte* cur_entry = byte_for(mr.start()); 619 jbyte* limit = byte_after(mr.last()); 620 // The region mr may not start on a card boundary so 621 // the first card may reflect a write to the space 622 // just prior to mr. 623 if (!is_aligned(mr.start())) { 624 cur_entry++; 625 } 626 for (;cur_entry < limit; cur_entry++) { 627 guarantee(*cur_entry == CardTableModRefBS::clean_card, 628 "Unexpected dirty card found"); 629 } 630 } 631 }