1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/cardTableRS.hpp" 28 #include "memory/genCollectedHeap.hpp" 29 #include "memory/generation.hpp" 30 #include "memory/space.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/java.hpp" 33 #include "runtime/os.hpp" 34 #ifndef SERIALGC 35 #include "gc_implementation/g1/concurrentMark.hpp" 36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 37 #endif 38 39 CardTableRS::CardTableRS(MemRegion whole_heap, 40 int max_covered_regions) : 41 GenRemSet(), 42 _cur_youngergen_card_val(youngergenP1_card), 43 _regions_to_iterate(max_covered_regions - 1) 44 { 45 #ifndef SERIALGC 46 if (UseG1GC) { 47 _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap, 48 max_covered_regions); 49 } else { 50 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); 51 } 52 #else 53 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); 54 #endif 55 set_bs(_ct_bs); 56 _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1]; 57 if (_last_cur_val_in_gen == NULL) { 58 vm_exit_during_initialization("Could not last_cur_val_in_gen array."); 59 } 60 for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) { 61 _last_cur_val_in_gen[i] = clean_card_val(); 62 } 63 _ct_bs->set_CTRS(this); 64 } 65 66 void CardTableRS::resize_covered_region(MemRegion new_region) { 67 _ct_bs->resize_covered_region(new_region); 68 } 69 70 jbyte CardTableRS::find_unused_youngergenP_card_value() { 71 for (jbyte v = youngergenP1_card; 72 v < cur_youngergen_and_prev_nonclean_card; 73 v++) { 74 bool seen = false; 75 for (int g = 0; g < _regions_to_iterate; g++) { 76 if (_last_cur_val_in_gen[g] == v) { 77 seen = true; 78 break; 79 } 80 } 81 if (!seen) return v; 82 } 83 ShouldNotReachHere(); 84 return 0; 85 } 86 87 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 88 // Parallel or sequential, we must always set the prev to equal the 89 // last one written. 90 if (parallel) { 91 // Find a parallel value to be used next. 92 jbyte next_val = find_unused_youngergenP_card_value(); 93 set_cur_youngergen_card_val(next_val); 94 95 } else { 96 // In an sequential traversal we will always write youngergen, so that 97 // the inline barrier is correct. 98 set_cur_youngergen_card_val(youngergen_card); 99 } 100 } 101 102 void CardTableRS::younger_refs_iterate(Generation* g, 103 OopsInGenClosure* blk) { 104 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); 105 g->younger_refs_iterate(blk); 106 } 107 108 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { 109 if (_is_par) { 110 return clear_card_parallel(entry); 111 } else { 112 return clear_card_serial(entry); 113 } 114 } 115 116 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { 117 while (true) { 118 // In the parallel case, we may have to do this several times. 119 jbyte entry_val = *entry; 120 assert(entry_val != CardTableRS::clean_card_val(), 121 "We shouldn't be looking at clean cards, and this should " 122 "be the only place they get cleaned."); 123 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 124 || _ct->is_prev_youngergen_card_val(entry_val)) { 125 jbyte res = 126 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 127 if (res == entry_val) { 128 break; 129 } else { 130 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 131 "The CAS above should only fail if another thread did " 132 "a GC write barrier."); 133 } 134 } else if (entry_val == 135 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 136 // Parallelism shouldn't matter in this case. Only the thread 137 // assigned to scan the card should change this value. 138 *entry = _ct->cur_youngergen_card_val(); 139 break; 140 } else { 141 assert(entry_val == _ct->cur_youngergen_card_val(), 142 "Should be the only possibility."); 143 // In this case, the card was clean before, and become 144 // cur_youngergen only because of processing of a promoted object. 145 // We don't have to look at the card. 146 return false; 147 } 148 } 149 return true; 150 } 151 152 153 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 154 jbyte entry_val = *entry; 155 assert(entry_val != CardTableRS::clean_card_val(), 156 "We shouldn't be looking at clean cards, and this should " 157 "be the only place they get cleaned."); 158 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 159 "This should be possible in the sequential case."); 160 *entry = CardTableRS::clean_card_val(); 161 return true; 162 } 163 164 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 165 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : 166 _dirty_card_closure(dirty_card_closure), _ct(ct) { 167 // Cannot yet substitute active_workers for n_par_threads 168 // in the case where parallelism is being turned off by 169 // setting n_par_threads to 0. 170 _is_par = (SharedHeap::heap()->n_par_threads() > 0); 171 assert(!_is_par || 172 (SharedHeap::heap()->n_par_threads() == 173 SharedHeap::heap()->workers()->active_workers()), "Mismatch"); 174 } 175 176 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 177 assert(mr.word_size() > 0, "Error"); 178 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 179 // mr.end() may not necessarily be card aligned. 180 jbyte* cur_entry = _ct->byte_for(mr.last()); 181 const jbyte* limit = _ct->byte_for(mr.start()); 182 HeapWord* end_of_non_clean = mr.end(); 183 HeapWord* start_of_non_clean = end_of_non_clean; 184 while (cur_entry >= limit) { 185 HeapWord* cur_hw = _ct->addr_for(cur_entry); 186 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 187 // Continue the dirty range by opening the 188 // dirty window one card to the left. 189 start_of_non_clean = cur_hw; 190 } else { 191 // We hit a "clean" card; process any non-empty 192 // "dirty" range accumulated so far. 193 if (start_of_non_clean < end_of_non_clean) { 194 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 195 _dirty_card_closure->do_MemRegion(mrd); 196 } 197 // Reset the dirty window, while continuing to look 198 // for the next dirty card that will start a 199 // new dirty window. 200 end_of_non_clean = cur_hw; 201 start_of_non_clean = cur_hw; 202 } 203 // Note that "cur_entry" leads "start_of_non_clean" in 204 // its leftward excursion after this point 205 // in the loop and, when we hit the left end of "mr", 206 // will point off of the left end of the card-table 207 // for "mr". 208 cur_entry--; 209 } 210 // If the first card of "mr" was dirty, we will have 211 // been left with a dirty window, co-initial with "mr", 212 // which we now process. 213 if (start_of_non_clean < end_of_non_clean) { 214 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 215 _dirty_card_closure->do_MemRegion(mrd); 216 } 217 } 218 219 // clean (by dirty->clean before) ==> cur_younger_gen 220 // dirty ==> cur_youngergen_and_prev_nonclean_card 221 // precleaned ==> cur_youngergen_and_prev_nonclean_card 222 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 223 // cur-younger-gen ==> cur_younger_gen 224 // cur_youngergen_and_prev_nonclean_card ==> no change. 225 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 226 jbyte* entry = ct_bs()->byte_for(field); 227 do { 228 jbyte entry_val = *entry; 229 // We put this first because it's probably the most common case. 230 if (entry_val == clean_card_val()) { 231 // No threat of contention with cleaning threads. 232 *entry = cur_youngergen_card_val(); 233 return; 234 } else if (card_is_dirty_wrt_gen_iter(entry_val) 235 || is_prev_youngergen_card_val(entry_val)) { 236 // Mark it as both cur and prev youngergen; card cleaning thread will 237 // eventually remove the previous stuff. 238 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 239 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 240 // Did the CAS succeed? 241 if (res == entry_val) return; 242 // Otherwise, retry, to see the new value. 243 continue; 244 } else { 245 assert(entry_val == cur_youngergen_and_prev_nonclean_card 246 || entry_val == cur_youngergen_card_val(), 247 "should be only possibilities."); 248 return; 249 } 250 } while (true); 251 } 252 253 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 254 OopsInGenClosure* cl) { 255 const MemRegion urasm = sp->used_region_at_save_marks(); 256 #ifdef ASSERT 257 // Convert the assertion check to a warning if we are running 258 // CMS+ParNew until related bug is fixed. 259 MemRegion ur = sp->used_region(); 260 assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC), 261 err_msg("Did you forget to call save_marks()? " 262 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 263 "[" PTR_FORMAT ", " PTR_FORMAT ")", 264 urasm.start(), urasm.end(), ur.start(), ur.end())); 265 // In the case of CMS+ParNew, issue a warning 266 if (!ur.contains(urasm)) { 267 assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above"); 268 warning("CMS+ParNew: Did you forget to call save_marks()? " 269 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 270 "[" PTR_FORMAT ", " PTR_FORMAT ")", 271 urasm.start(), urasm.end(), ur.start(), ur.end()); 272 MemRegion ur2 = sp->used_region(); 273 MemRegion urasm2 = sp->used_region_at_save_marks(); 274 if (!ur.equals(ur2)) { 275 warning("CMS+ParNew: Flickering used_region()!!"); 276 } 277 if (!urasm.equals(urasm2)) { 278 warning("CMS+ParNew: Flickering used_region_at_save_marks()!!"); 279 } 280 ShouldNotReachHere(); 281 } 282 #endif 283 _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); 284 } 285 286 void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) { 287 GenCollectedHeap* gch = GenCollectedHeap::heap(); 288 // Generations younger than gen have been evacuated. We can clear 289 // card table entries for gen (we know that it has no pointers 290 // to younger gens) and for those below. The card tables for 291 // the youngest gen need never be cleared, and those for perm gen 292 // will be cleared based on the parameter clear_perm. 293 // There's a bit of subtlety in the clear() and invalidate() 294 // methods that we exploit here and in invalidate_or_clear() 295 // below to avoid missing cards at the fringes. If clear() or 296 // invalidate() are changed in the future, this code should 297 // be revisited. 20040107.ysr 298 Generation* g = gen; 299 for(Generation* prev_gen = gch->prev_gen(g); 300 prev_gen != NULL; 301 g = prev_gen, prev_gen = gch->prev_gen(g)) { 302 MemRegion to_be_cleared_mr = g->prev_used_region(); 303 clear(to_be_cleared_mr); 304 } 305 // Clear perm gen cards if asked to do so. 306 if (clear_perm) { 307 MemRegion to_be_cleared_mr = gch->perm_gen()->prev_used_region(); 308 clear(to_be_cleared_mr); 309 } 310 } 311 312 void CardTableRS::invalidate_or_clear(Generation* gen, bool younger, 313 bool perm) { 314 GenCollectedHeap* gch = GenCollectedHeap::heap(); 315 // For each generation gen (and younger and/or perm) 316 // invalidate the cards for the currently occupied part 317 // of that generation and clear the cards for the 318 // unoccupied part of the generation (if any, making use 319 // of that generation's prev_used_region to determine that 320 // region). No need to do anything for the youngest 321 // generation. Also see note#20040107.ysr above. 322 Generation* g = gen; 323 for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL; 324 g = prev_gen, prev_gen = gch->prev_gen(g)) { 325 MemRegion used_mr = g->used_region(); 326 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr); 327 if (!to_be_cleared_mr.is_empty()) { 328 clear(to_be_cleared_mr); 329 } 330 invalidate(used_mr); 331 if (!younger) break; 332 } 333 // Clear perm gen cards if asked to do so. 334 if (perm) { 335 g = gch->perm_gen(); 336 MemRegion used_mr = g->used_region(); 337 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr); 338 if (!to_be_cleared_mr.is_empty()) { 339 clear(to_be_cleared_mr); 340 } 341 invalidate(used_mr); 342 } 343 } 344 345 346 class VerifyCleanCardClosure: public OopClosure { 347 private: 348 HeapWord* _boundary; 349 HeapWord* _begin; 350 HeapWord* _end; 351 protected: 352 template <class T> void do_oop_work(T* p) { 353 HeapWord* jp = (HeapWord*)p; 354 assert(jp >= _begin && jp < _end, 355 err_msg("Error: jp " PTR_FORMAT " should be within " 356 "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", 357 _begin, _end)); 358 oop obj = oopDesc::load_decode_heap_oop(p); 359 guarantee(obj == NULL || (HeapWord*)obj >= _boundary, 360 err_msg("pointer " PTR_FORMAT " at " PTR_FORMAT " on " 361 "clean card crosses boundary" PTR_FORMAT, 362 (HeapWord*)obj, jp, _boundary)); 363 } 364 365 public: 366 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 367 _boundary(b), _begin(begin), _end(end) { 368 assert(b <= begin, 369 err_msg("Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, 370 b, begin)); 371 assert(begin <= end, 372 err_msg("Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, 373 begin, end)); 374 } 375 376 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 377 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 378 }; 379 380 class VerifyCTSpaceClosure: public SpaceClosure { 381 private: 382 CardTableRS* _ct; 383 HeapWord* _boundary; 384 public: 385 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 386 _ct(ct), _boundary(boundary) {} 387 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 388 }; 389 390 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 391 CardTableRS* _ct; 392 public: 393 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 394 void do_generation(Generation* gen) { 395 // Skip the youngest generation. 396 if (gen->level() == 0) return; 397 // Normally, we're interested in pointers to younger generations. 398 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 399 gen->space_iterate(&blk, true); 400 } 401 }; 402 403 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 404 // We don't need to do young-gen spaces. 405 if (s->end() <= gen_boundary) return; 406 MemRegion used = s->used_region(); 407 408 jbyte* cur_entry = byte_for(used.start()); 409 jbyte* limit = byte_after(used.last()); 410 while (cur_entry < limit) { 411 if (*cur_entry == CardTableModRefBS::clean_card) { 412 jbyte* first_dirty = cur_entry+1; 413 while (first_dirty < limit && 414 *first_dirty == CardTableModRefBS::clean_card) { 415 first_dirty++; 416 } 417 // If the first object is a regular object, and it has a 418 // young-to-old field, that would mark the previous card. 419 HeapWord* boundary = addr_for(cur_entry); 420 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 421 HeapWord* boundary_block = s->block_start(boundary); 422 HeapWord* begin = boundary; // Until proven otherwise. 423 HeapWord* start_block = boundary_block; // Until proven otherwise. 424 if (boundary_block < boundary) { 425 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 426 oop boundary_obj = oop(boundary_block); 427 if (!boundary_obj->is_objArray() && 428 !boundary_obj->is_typeArray()) { 429 guarantee(cur_entry > byte_for(used.start()), 430 "else boundary would be boundary_block"); 431 if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) { 432 begin = boundary_block + s->block_size(boundary_block); 433 start_block = begin; 434 } 435 } 436 } 437 } 438 // Now traverse objects until end. 439 if (begin < end) { 440 MemRegion mr(begin, end); 441 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 442 for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { 443 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 444 oop(cur)->oop_iterate(&verify_blk, mr); 445 } 446 } 447 } 448 cur_entry = first_dirty; 449 } else { 450 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 451 // is a transient value, that cannot be in the card table 452 // except during GC, and thus assert that: 453 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 454 // "Illegal CT value"); 455 // That however, need not hold, as will become clear in the 456 // following... 457 458 // We'd normally expect that if we are in the parallel case, 459 // we can't have left a prev value (which would be different 460 // from the current value) in the card table, and so we'd like to 461 // assert that: 462 // guarantee(cur_youngergen_card_val() == youngergen_card 463 // || !is_prev_youngergen_card_val(*cur_entry), 464 // "Illegal CT value"); 465 // That, however, may not hold occasionally, because of 466 // CMS or MSC in the old gen. To wit, consider the 467 // following two simple illustrative scenarios: 468 // (a) CMS: Consider the case where a large object L 469 // spanning several cards is allocated in the old 470 // gen, and has a young gen reference stored in it, dirtying 471 // some interior cards. A young collection scans the card, 472 // finds a young ref and installs a youngergenP_n value. 473 // L then goes dead. Now a CMS collection starts, 474 // finds L dead and sweeps it up. Assume that L is 475 // abutting _unallocated_blk, so _unallocated_blk is 476 // adjusted down to (below) L. Assume further that 477 // no young collection intervenes during this CMS cycle. 478 // The next young gen cycle will not get to look at this 479 // youngergenP_n card since it lies in the unoccupied 480 // part of the space. 481 // Some young collections later the blocks on this 482 // card can be re-allocated either due to direct allocation 483 // or due to absorbing promotions. At this time, the 484 // before-gc verification will fail the above assert. 485 // (b) MSC: In this case, an object L with a young reference 486 // is on a card that (therefore) holds a youngergen_n value. 487 // Suppose also that L lies towards the end of the used 488 // the used space before GC. An MSC collection 489 // occurs that compacts to such an extent that this 490 // card is no longer in the occupied part of the space. 491 // Since current code in MSC does not always clear cards 492 // in the unused part of old gen, this stale youngergen_n 493 // value is left behind and can later be covered by 494 // an object when promotion or direct allocation 495 // re-allocates that part of the heap. 496 // 497 // Fortunately, the presence of such stale card values is 498 // "only" a minor annoyance in that subsequent young collections 499 // might needlessly scan such cards, but would still never corrupt 500 // the heap as a result. However, it's likely not to be a significant 501 // performance inhibitor in practice. For instance, 502 // some recent measurements with unoccupied cards eagerly cleared 503 // out to maintain this invariant, showed next to no 504 // change in young collection times; of course one can construct 505 // degenerate examples where the cost can be significant.) 506 // Note, in particular, that if the "stale" card is modified 507 // after re-allocation, it would be dirty, not "stale". Thus, 508 // we can never have a younger ref in such a card and it is 509 // safe not to scan that card in any collection. [As we see 510 // below, we do some unnecessary scanning 511 // in some cases in the current parallel scanning algorithm.] 512 // 513 // The main point below is that the parallel card scanning code 514 // deals correctly with these stale card values. There are two main 515 // cases to consider where we have a stale "younger gen" value and a 516 // "derivative" case to consider, where we have a stale 517 // "cur_younger_gen_and_prev_non_clean" value, as will become 518 // apparent in the case analysis below. 519 // o Case 1. If the stale value corresponds to a younger_gen_n 520 // value other than the cur_younger_gen value then the code 521 // treats this as being tantamount to a prev_younger_gen 522 // card. This means that the card may be unnecessarily scanned. 523 // There are two sub-cases to consider: 524 // o Case 1a. Let us say that the card is in the occupied part 525 // of the generation at the time the collection begins. In 526 // that case the card will be either cleared when it is scanned 527 // for young pointers, or will be set to cur_younger_gen as a 528 // result of promotion. (We have elided the normal case where 529 // the scanning thread and the promoting thread interleave 530 // possibly resulting in a transient 531 // cur_younger_gen_and_prev_non_clean value before settling 532 // to cur_younger_gen. [End Case 1a.] 533 // o Case 1b. Consider now the case when the card is in the unoccupied 534 // part of the space which becomes occupied because of promotions 535 // into it during the current young GC. In this case the card 536 // will never be scanned for young references. The current 537 // code will set the card value to either 538 // cur_younger_gen_and_prev_non_clean or leave 539 // it with its stale value -- because the promotions didn't 540 // result in any younger refs on that card. Of these two 541 // cases, the latter will be covered in Case 1a during 542 // a subsequent scan. To deal with the former case, we need 543 // to further consider how we deal with a stale value of 544 // cur_younger_gen_and_prev_non_clean in our case analysis 545 // below. This we do in Case 3 below. [End Case 1b] 546 // [End Case 1] 547 // o Case 2. If the stale value corresponds to cur_younger_gen being 548 // a value not necessarily written by a current promotion, the 549 // card will not be scanned by the younger refs scanning code. 550 // (This is OK since as we argued above such cards cannot contain 551 // any younger refs.) The result is that this value will be 552 // treated as a prev_younger_gen value in a subsequent collection, 553 // which is addressed in Case 1 above. [End Case 2] 554 // o Case 3. We here consider the "derivative" case from Case 1b. above 555 // because of which we may find a stale 556 // cur_younger_gen_and_prev_non_clean card value in the table. 557 // Once again, as in Case 1, we consider two subcases, depending 558 // on whether the card lies in the occupied or unoccupied part 559 // of the space at the start of the young collection. 560 // o Case 3a. Let us say the card is in the occupied part of 561 // the old gen at the start of the young collection. In that 562 // case, the card will be scanned by the younger refs scanning 563 // code which will set it to cur_younger_gen. In a subsequent 564 // scan, the card will be considered again and get its final 565 // correct value. [End Case 3a] 566 // o Case 3b. Now consider the case where the card is in the 567 // unoccupied part of the old gen, and is occupied as a result 568 // of promotions during thus young gc. In that case, 569 // the card will not be scanned for younger refs. The presence 570 // of newly promoted objects on the card will then result in 571 // its keeping the value cur_younger_gen_and_prev_non_clean 572 // value, which we have dealt with in Case 3 here. [End Case 3b] 573 // [End Case 3] 574 // 575 // (Please refer to the code in the helper class 576 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.) 577 // 578 // The informal arguments above can be tightened into a formal 579 // correctness proof and it behooves us to write up such a proof, 580 // or to use model checking to prove that there are no lingering 581 // concerns. 582 // 583 // Clearly because of Case 3b one cannot bound the time for 584 // which a card will retain what we have called a "stale" value. 585 // However, one can obtain a Loose upper bound on the redundant 586 // work as a result of such stale values. Note first that any 587 // time a stale card lies in the occupied part of the space at 588 // the start of the collection, it is scanned by younger refs 589 // code and we can define a rank function on card values that 590 // declines when this is so. Note also that when a card does not 591 // lie in the occupied part of the space at the beginning of a 592 // young collection, its rank can either decline or stay unchanged. 593 // In this case, no extra work is done in terms of redundant 594 // younger refs scanning of that card. 595 // Then, the case analysis above reveals that, in the worst case, 596 // any such stale card will be scanned unnecessarily at most twice. 597 // 598 // It is nonethelss advisable to try and get rid of some of this 599 // redundant work in a subsequent (low priority) re-design of 600 // the card-scanning code, if only to simplify the underlying 601 // state machine analysis/proof. ysr 1/28/2002. XXX 602 cur_entry++; 603 } 604 } 605 } 606 607 void CardTableRS::verify() { 608 // At present, we only know how to verify the card table RS for 609 // generational heaps. 610 VerifyCTGenClosure blk(this); 611 CollectedHeap* ch = Universe::heap(); 612 // We will do the perm-gen portion of the card table, too. 613 Generation* pg = SharedHeap::heap()->perm_gen(); 614 HeapWord* pg_boundary = pg->reserved().start(); 615 616 if (ch->kind() == CollectedHeap::GenCollectedHeap) { 617 GenCollectedHeap::heap()->generation_iterate(&blk, false); 618 _ct_bs->verify(); 619 620 // If the old gen collections also collect perm, then we are only 621 // interested in perm-to-young pointers, not perm-to-old pointers. 622 GenCollectedHeap* gch = GenCollectedHeap::heap(); 623 CollectorPolicy* cp = gch->collector_policy(); 624 if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) { 625 pg_boundary = gch->get_gen(1)->reserved().start(); 626 } 627 } 628 VerifyCTSpaceClosure perm_space_blk(this, pg_boundary); 629 SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true); 630 } 631 632 633 void CardTableRS::verify_aligned_region_empty(MemRegion mr) { 634 if (!mr.is_empty()) { 635 jbyte* cur_entry = byte_for(mr.start()); 636 jbyte* limit = byte_after(mr.last()); 637 // The region mr may not start on a card boundary so 638 // the first card may reflect a write to the space 639 // just prior to mr. 640 if (!is_aligned(mr.start())) { 641 cur_entry++; 642 } 643 for (;cur_entry < limit; cur_entry++) { 644 guarantee(*cur_entry == CardTableModRefBS::clean_card, 645 "Unexpected dirty card found"); 646 } 647 } 648 }