1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTableRS.hpp" 27 #include "gc/shared/genCollectedHeap.hpp" 28 #include "gc/shared/generation.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/atomic.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/os.hpp" 35 #include "utilities/macros.hpp" 36 37 class HasAccumulatedModifiedOopsClosure : public KlassClosure { 38 bool _found; 39 public: 40 HasAccumulatedModifiedOopsClosure() : _found(false) {} 41 void do_klass(Klass* klass) { 42 if (_found) { 43 return; 44 } 45 46 if (klass->has_accumulated_modified_oops()) { 47 _found = true; 48 } 49 } 50 bool found() { 51 return _found; 52 } 53 }; 54 55 bool KlassRemSet::mod_union_is_clear() { 56 HasAccumulatedModifiedOopsClosure closure; 57 ClassLoaderDataGraph::classes_do(&closure); 58 59 return !closure.found(); 60 } 61 62 63 class ClearKlassModUnionClosure : public KlassClosure { 64 public: 65 void do_klass(Klass* klass) { 66 if (klass->has_accumulated_modified_oops()) { 67 klass->clear_accumulated_modified_oops(); 68 } 69 } 70 }; 71 72 void KlassRemSet::clear_mod_union() { 73 ClearKlassModUnionClosure closure; 74 ClassLoaderDataGraph::classes_do(&closure); 75 } 76 77 CardTableRS::CardTableRS(MemRegion whole_heap) : 78 _bs(NULL), 79 _cur_youngergen_card_val(youngergenP1_card) 80 { 81 _ct_bs = new CardTableModRefBSForCTRS(whole_heap); 82 _ct_bs->initialize(); 83 set_bs(_ct_bs); 84 // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() 85 // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. 86 uint max_gens = 2; 87 _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, 88 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 89 if (_last_cur_val_in_gen == NULL) { 90 vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); 91 } 92 for (uint i = 0; i < max_gens + 1; i++) { 93 _last_cur_val_in_gen[i] = clean_card_val(); 94 } 95 _ct_bs->set_CTRS(this); 96 } 97 98 CardTableRS::~CardTableRS() { 99 if (_ct_bs) { 100 delete _ct_bs; 101 _ct_bs = NULL; 102 } 103 if (_last_cur_val_in_gen) { 104 FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); 105 } 106 } 107 108 void CardTableRS::resize_covered_region(MemRegion new_region) { 109 _ct_bs->resize_covered_region(new_region); 110 } 111 112 jbyte CardTableRS::find_unused_youngergenP_card_value() { 113 for (jbyte v = youngergenP1_card; 114 v < cur_youngergen_and_prev_nonclean_card; 115 v++) { 116 bool seen = false; 117 for (int g = 0; g < _regions_to_iterate; g++) { 118 if (_last_cur_val_in_gen[g] == v) { 119 seen = true; 120 break; 121 } 122 } 123 if (!seen) { 124 return v; 125 } 126 } 127 ShouldNotReachHere(); 128 return 0; 129 } 130 131 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 132 // Parallel or sequential, we must always set the prev to equal the 133 // last one written. 134 if (parallel) { 135 // Find a parallel value to be used next. 136 jbyte next_val = find_unused_youngergenP_card_value(); 137 set_cur_youngergen_card_val(next_val); 138 139 } else { 140 // In an sequential traversal we will always write youngergen, so that 141 // the inline barrier is correct. 142 set_cur_youngergen_card_val(youngergen_card); 143 } 144 } 145 146 void CardTableRS::younger_refs_iterate(Generation* g, 147 OopsInGenClosure* blk, 148 uint n_threads) { 149 // The indexing in this array is slightly odd. We want to access 150 // the old generation record here, which is at index 2. 151 _last_cur_val_in_gen[2] = cur_youngergen_card_val(); 152 g->younger_refs_iterate(blk, n_threads); 153 } 154 155 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { 156 if (_is_par) { 157 return clear_card_parallel(entry); 158 } else { 159 return clear_card_serial(entry); 160 } 161 } 162 163 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { 164 while (true) { 165 // In the parallel case, we may have to do this several times. 166 jbyte entry_val = *entry; 167 assert(entry_val != CardTableRS::clean_card_val(), 168 "We shouldn't be looking at clean cards, and this should " 169 "be the only place they get cleaned."); 170 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 171 || _ct->is_prev_youngergen_card_val(entry_val)) { 172 jbyte res = 173 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 174 if (res == entry_val) { 175 break; 176 } else { 177 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 178 "The CAS above should only fail if another thread did " 179 "a GC write barrier."); 180 } 181 } else if (entry_val == 182 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 183 // Parallelism shouldn't matter in this case. Only the thread 184 // assigned to scan the card should change this value. 185 *entry = _ct->cur_youngergen_card_val(); 186 break; 187 } else { 188 assert(entry_val == _ct->cur_youngergen_card_val(), 189 "Should be the only possibility."); 190 // In this case, the card was clean before, and become 191 // cur_youngergen only because of processing of a promoted object. 192 // We don't have to look at the card. 193 return false; 194 } 195 } 196 return true; 197 } 198 199 200 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 201 jbyte entry_val = *entry; 202 assert(entry_val != CardTableRS::clean_card_val(), 203 "We shouldn't be looking at clean cards, and this should " 204 "be the only place they get cleaned."); 205 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 206 "This should be possible in the sequential case."); 207 *entry = CardTableRS::clean_card_val(); 208 return true; 209 } 210 211 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 212 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : 213 _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { 214 } 215 216 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 217 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 218 } 219 220 // The regions are visited in *decreasing* address order. 221 // This order aids with imprecise card marking, where a dirty 222 // card may cause scanning, and summarization marking, of objects 223 // that extend onto subsequent cards. 224 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 225 assert(mr.word_size() > 0, "Error"); 226 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 227 // mr.end() may not necessarily be card aligned. 228 jbyte* cur_entry = _ct->byte_for(mr.last()); 229 const jbyte* limit = _ct->byte_for(mr.start()); 230 HeapWord* end_of_non_clean = mr.end(); 231 HeapWord* start_of_non_clean = end_of_non_clean; 232 while (cur_entry >= limit) { 233 HeapWord* cur_hw = _ct->addr_for(cur_entry); 234 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 235 // Continue the dirty range by opening the 236 // dirty window one card to the left. 237 start_of_non_clean = cur_hw; 238 } else { 239 // We hit a "clean" card; process any non-empty 240 // "dirty" range accumulated so far. 241 if (start_of_non_clean < end_of_non_clean) { 242 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 243 _dirty_card_closure->do_MemRegion(mrd); 244 } 245 246 // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary 247 if (is_word_aligned(cur_entry)) { 248 jbyte* cur_row = cur_entry - BytesPerWord; 249 while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { 250 cur_row -= BytesPerWord; 251 } 252 cur_entry = cur_row + BytesPerWord; 253 cur_hw = _ct->addr_for(cur_entry); 254 } 255 256 // Reset the dirty window, while continuing to look 257 // for the next dirty card that will start a 258 // new dirty window. 259 end_of_non_clean = cur_hw; 260 start_of_non_clean = cur_hw; 261 } 262 // Note that "cur_entry" leads "start_of_non_clean" in 263 // its leftward excursion after this point 264 // in the loop and, when we hit the left end of "mr", 265 // will point off of the left end of the card-table 266 // for "mr". 267 cur_entry--; 268 } 269 // If the first card of "mr" was dirty, we will have 270 // been left with a dirty window, co-initial with "mr", 271 // which we now process. 272 if (start_of_non_clean < end_of_non_clean) { 273 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 274 _dirty_card_closure->do_MemRegion(mrd); 275 } 276 } 277 278 // clean (by dirty->clean before) ==> cur_younger_gen 279 // dirty ==> cur_youngergen_and_prev_nonclean_card 280 // precleaned ==> cur_youngergen_and_prev_nonclean_card 281 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 282 // cur-younger-gen ==> cur_younger_gen 283 // cur_youngergen_and_prev_nonclean_card ==> no change. 284 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 285 jbyte* entry = _ct_bs->byte_for(field); 286 do { 287 jbyte entry_val = *entry; 288 // We put this first because it's probably the most common case. 289 if (entry_val == clean_card_val()) { 290 // No threat of contention with cleaning threads. 291 *entry = cur_youngergen_card_val(); 292 return; 293 } else if (card_is_dirty_wrt_gen_iter(entry_val) 294 || is_prev_youngergen_card_val(entry_val)) { 295 // Mark it as both cur and prev youngergen; card cleaning thread will 296 // eventually remove the previous stuff. 297 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 298 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 299 // Did the CAS succeed? 300 if (res == entry_val) return; 301 // Otherwise, retry, to see the new value. 302 continue; 303 } else { 304 assert(entry_val == cur_youngergen_and_prev_nonclean_card 305 || entry_val == cur_youngergen_card_val(), 306 "should be only possibilities."); 307 return; 308 } 309 } while (true); 310 } 311 312 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 313 OopsInGenClosure* cl, 314 uint n_threads) { 315 const MemRegion urasm = sp->used_region_at_save_marks(); 316 #ifdef ASSERT 317 // Convert the assertion check to a warning if we are running 318 // CMS+ParNew until related bug is fixed. 319 MemRegion ur = sp->used_region(); 320 assert(ur.contains(urasm) || (UseConcMarkSweepGC), 321 "Did you forget to call save_marks()? " 322 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 323 "[" PTR_FORMAT ", " PTR_FORMAT ")", 324 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 325 // In the case of CMS+ParNew, issue a warning 326 if (!ur.contains(urasm)) { 327 assert(UseConcMarkSweepGC, "Tautology: see assert above"); 328 log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " 329 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 330 "[" PTR_FORMAT ", " PTR_FORMAT ")", 331 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 332 MemRegion ur2 = sp->used_region(); 333 MemRegion urasm2 = sp->used_region_at_save_marks(); 334 if (!ur.equals(ur2)) { 335 log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); 336 } 337 if (!urasm.equals(urasm2)) { 338 log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); 339 } 340 ShouldNotReachHere(); 341 } 342 #endif 343 _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); 344 } 345 346 void CardTableRS::clear_into_younger(Generation* old_gen) { 347 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 348 "Should only be called for the old generation"); 349 // The card tables for the youngest gen need never be cleared. 350 // There's a bit of subtlety in the clear() and invalidate() 351 // methods that we exploit here and in invalidate_or_clear() 352 // below to avoid missing cards at the fringes. If clear() or 353 // invalidate() are changed in the future, this code should 354 // be revisited. 20040107.ysr 355 clear(old_gen->prev_used_region()); 356 } 357 358 void CardTableRS::invalidate_or_clear(Generation* old_gen) { 359 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 360 "Should only be called for the old generation"); 361 // Invalidate the cards for the currently occupied part of 362 // the old generation and clear the cards for the 363 // unoccupied part of the generation (if any, making use 364 // of that generation's prev_used_region to determine that 365 // region). No need to do anything for the youngest 366 // generation. Also see note#20040107.ysr above. 367 MemRegion used_mr = old_gen->used_region(); 368 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr); 369 if (!to_be_cleared_mr.is_empty()) { 370 clear(to_be_cleared_mr); 371 } 372 invalidate(used_mr); 373 } 374 375 376 class VerifyCleanCardClosure: public OopClosure { 377 private: 378 HeapWord* _boundary; 379 HeapWord* _begin; 380 HeapWord* _end; 381 protected: 382 template <class T> void do_oop_work(T* p) { 383 HeapWord* jp = (HeapWord*)p; 384 assert(jp >= _begin && jp < _end, 385 "Error: jp " PTR_FORMAT " should be within " 386 "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", 387 p2i(jp), p2i(_begin), p2i(_end)); 388 oop obj = oopDesc::load_decode_heap_oop(p); 389 guarantee(obj == NULL || (HeapWord*)obj >= _boundary, 390 "pointer " PTR_FORMAT " at " PTR_FORMAT " on " 391 "clean card crosses boundary" PTR_FORMAT, 392 p2i(obj), p2i(jp), p2i(_boundary)); 393 } 394 395 public: 396 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 397 _boundary(b), _begin(begin), _end(end) { 398 assert(b <= begin, 399 "Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, 400 p2i(b), p2i(begin)); 401 assert(begin <= end, 402 "Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, 403 p2i(begin), p2i(end)); 404 } 405 406 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 407 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 408 }; 409 410 class VerifyCTSpaceClosure: public SpaceClosure { 411 private: 412 CardTableRS* _ct; 413 HeapWord* _boundary; 414 public: 415 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 416 _ct(ct), _boundary(boundary) {} 417 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 418 }; 419 420 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 421 CardTableRS* _ct; 422 public: 423 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 424 void do_generation(Generation* gen) { 425 // Skip the youngest generation. 426 if (GenCollectedHeap::heap()->is_young_gen(gen)) { 427 return; 428 } 429 // Normally, we're interested in pointers to younger generations. 430 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 431 gen->space_iterate(&blk, true); 432 } 433 }; 434 435 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 436 // We don't need to do young-gen spaces. 437 if (s->end() <= gen_boundary) return; 438 MemRegion used = s->used_region(); 439 440 jbyte* cur_entry = byte_for(used.start()); 441 jbyte* limit = byte_after(used.last()); 442 while (cur_entry < limit) { 443 if (*cur_entry == clean_card_val()) { 444 jbyte* first_dirty = cur_entry+1; 445 while (first_dirty < limit && 446 *first_dirty == clean_card_val()) { 447 first_dirty++; 448 } 449 // If the first object is a regular object, and it has a 450 // young-to-old field, that would mark the previous card. 451 HeapWord* boundary = addr_for(cur_entry); 452 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 453 HeapWord* boundary_block = s->block_start(boundary); 454 HeapWord* begin = boundary; // Until proven otherwise. 455 HeapWord* start_block = boundary_block; // Until proven otherwise. 456 if (boundary_block < boundary) { 457 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 458 oop boundary_obj = oop(boundary_block); 459 if (!boundary_obj->is_objArray() && 460 !boundary_obj->is_typeArray()) { 461 guarantee(cur_entry > byte_for(used.start()), 462 "else boundary would be boundary_block"); 463 if (*byte_for(boundary_block) != clean_card_val()) { 464 begin = boundary_block + s->block_size(boundary_block); 465 start_block = begin; 466 } 467 } 468 } 469 } 470 // Now traverse objects until end. 471 if (begin < end) { 472 MemRegion mr(begin, end); 473 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 474 for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { 475 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 476 oop(cur)->oop_iterate_no_header(&verify_blk, mr); 477 } 478 } 479 } 480 cur_entry = first_dirty; 481 } else { 482 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 483 // is a transient value, that cannot be in the card table 484 // except during GC, and thus assert that: 485 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 486 // "Illegal CT value"); 487 // That however, need not hold, as will become clear in the 488 // following... 489 490 // We'd normally expect that if we are in the parallel case, 491 // we can't have left a prev value (which would be different 492 // from the current value) in the card table, and so we'd like to 493 // assert that: 494 // guarantee(cur_youngergen_card_val() == youngergen_card 495 // || !is_prev_youngergen_card_val(*cur_entry), 496 // "Illegal CT value"); 497 // That, however, may not hold occasionally, because of 498 // CMS or MSC in the old gen. To wit, consider the 499 // following two simple illustrative scenarios: 500 // (a) CMS: Consider the case where a large object L 501 // spanning several cards is allocated in the old 502 // gen, and has a young gen reference stored in it, dirtying 503 // some interior cards. A young collection scans the card, 504 // finds a young ref and installs a youngergenP_n value. 505 // L then goes dead. Now a CMS collection starts, 506 // finds L dead and sweeps it up. Assume that L is 507 // abutting _unallocated_blk, so _unallocated_blk is 508 // adjusted down to (below) L. Assume further that 509 // no young collection intervenes during this CMS cycle. 510 // The next young gen cycle will not get to look at this 511 // youngergenP_n card since it lies in the unoccupied 512 // part of the space. 513 // Some young collections later the blocks on this 514 // card can be re-allocated either due to direct allocation 515 // or due to absorbing promotions. At this time, the 516 // before-gc verification will fail the above assert. 517 // (b) MSC: In this case, an object L with a young reference 518 // is on a card that (therefore) holds a youngergen_n value. 519 // Suppose also that L lies towards the end of the used 520 // the used space before GC. An MSC collection 521 // occurs that compacts to such an extent that this 522 // card is no longer in the occupied part of the space. 523 // Since current code in MSC does not always clear cards 524 // in the unused part of old gen, this stale youngergen_n 525 // value is left behind and can later be covered by 526 // an object when promotion or direct allocation 527 // re-allocates that part of the heap. 528 // 529 // Fortunately, the presence of such stale card values is 530 // "only" a minor annoyance in that subsequent young collections 531 // might needlessly scan such cards, but would still never corrupt 532 // the heap as a result. However, it's likely not to be a significant 533 // performance inhibitor in practice. For instance, 534 // some recent measurements with unoccupied cards eagerly cleared 535 // out to maintain this invariant, showed next to no 536 // change in young collection times; of course one can construct 537 // degenerate examples where the cost can be significant.) 538 // Note, in particular, that if the "stale" card is modified 539 // after re-allocation, it would be dirty, not "stale". Thus, 540 // we can never have a younger ref in such a card and it is 541 // safe not to scan that card in any collection. [As we see 542 // below, we do some unnecessary scanning 543 // in some cases in the current parallel scanning algorithm.] 544 // 545 // The main point below is that the parallel card scanning code 546 // deals correctly with these stale card values. There are two main 547 // cases to consider where we have a stale "young gen" value and a 548 // "derivative" case to consider, where we have a stale 549 // "cur_younger_gen_and_prev_non_clean" value, as will become 550 // apparent in the case analysis below. 551 // o Case 1. If the stale value corresponds to a younger_gen_n 552 // value other than the cur_younger_gen value then the code 553 // treats this as being tantamount to a prev_younger_gen 554 // card. This means that the card may be unnecessarily scanned. 555 // There are two sub-cases to consider: 556 // o Case 1a. Let us say that the card is in the occupied part 557 // of the generation at the time the collection begins. In 558 // that case the card will be either cleared when it is scanned 559 // for young pointers, or will be set to cur_younger_gen as a 560 // result of promotion. (We have elided the normal case where 561 // the scanning thread and the promoting thread interleave 562 // possibly resulting in a transient 563 // cur_younger_gen_and_prev_non_clean value before settling 564 // to cur_younger_gen. [End Case 1a.] 565 // o Case 1b. Consider now the case when the card is in the unoccupied 566 // part of the space which becomes occupied because of promotions 567 // into it during the current young GC. In this case the card 568 // will never be scanned for young references. The current 569 // code will set the card value to either 570 // cur_younger_gen_and_prev_non_clean or leave 571 // it with its stale value -- because the promotions didn't 572 // result in any younger refs on that card. Of these two 573 // cases, the latter will be covered in Case 1a during 574 // a subsequent scan. To deal with the former case, we need 575 // to further consider how we deal with a stale value of 576 // cur_younger_gen_and_prev_non_clean in our case analysis 577 // below. This we do in Case 3 below. [End Case 1b] 578 // [End Case 1] 579 // o Case 2. If the stale value corresponds to cur_younger_gen being 580 // a value not necessarily written by a current promotion, the 581 // card will not be scanned by the younger refs scanning code. 582 // (This is OK since as we argued above such cards cannot contain 583 // any younger refs.) The result is that this value will be 584 // treated as a prev_younger_gen value in a subsequent collection, 585 // which is addressed in Case 1 above. [End Case 2] 586 // o Case 3. We here consider the "derivative" case from Case 1b. above 587 // because of which we may find a stale 588 // cur_younger_gen_and_prev_non_clean card value in the table. 589 // Once again, as in Case 1, we consider two subcases, depending 590 // on whether the card lies in the occupied or unoccupied part 591 // of the space at the start of the young collection. 592 // o Case 3a. Let us say the card is in the occupied part of 593 // the old gen at the start of the young collection. In that 594 // case, the card will be scanned by the younger refs scanning 595 // code which will set it to cur_younger_gen. In a subsequent 596 // scan, the card will be considered again and get its final 597 // correct value. [End Case 3a] 598 // o Case 3b. Now consider the case where the card is in the 599 // unoccupied part of the old gen, and is occupied as a result 600 // of promotions during thus young gc. In that case, 601 // the card will not be scanned for younger refs. The presence 602 // of newly promoted objects on the card will then result in 603 // its keeping the value cur_younger_gen_and_prev_non_clean 604 // value, which we have dealt with in Case 3 here. [End Case 3b] 605 // [End Case 3] 606 // 607 // (Please refer to the code in the helper class 608 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.) 609 // 610 // The informal arguments above can be tightened into a formal 611 // correctness proof and it behooves us to write up such a proof, 612 // or to use model checking to prove that there are no lingering 613 // concerns. 614 // 615 // Clearly because of Case 3b one cannot bound the time for 616 // which a card will retain what we have called a "stale" value. 617 // However, one can obtain a Loose upper bound on the redundant 618 // work as a result of such stale values. Note first that any 619 // time a stale card lies in the occupied part of the space at 620 // the start of the collection, it is scanned by younger refs 621 // code and we can define a rank function on card values that 622 // declines when this is so. Note also that when a card does not 623 // lie in the occupied part of the space at the beginning of a 624 // young collection, its rank can either decline or stay unchanged. 625 // In this case, no extra work is done in terms of redundant 626 // younger refs scanning of that card. 627 // Then, the case analysis above reveals that, in the worst case, 628 // any such stale card will be scanned unnecessarily at most twice. 629 // 630 // It is nonetheless advisable to try and get rid of some of this 631 // redundant work in a subsequent (low priority) re-design of 632 // the card-scanning code, if only to simplify the underlying 633 // state machine analysis/proof. ysr 1/28/2002. XXX 634 cur_entry++; 635 } 636 } 637 } 638 639 void CardTableRS::verify() { 640 // At present, we only know how to verify the card table RS for 641 // generational heaps. 642 VerifyCTGenClosure blk(this); 643 GenCollectedHeap::heap()->generation_iterate(&blk, false); 644 _ct_bs->verify(); 645 }