1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTableRS.hpp" 27 #include "gc/shared/genCollectedHeap.hpp" 28 #include "gc/shared/generation.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "oops/access.inline.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/java.hpp" 35 #include "runtime/os.hpp" 36 #include "utilities/macros.hpp" 37 38 class HasAccumulatedModifiedOopsClosure : public CLDClosure { 39 bool _found; 40 public: 41 HasAccumulatedModifiedOopsClosure() : _found(false) {} 42 void do_cld(ClassLoaderData* cld) { 43 if (_found) { 44 return; 45 } 46 47 if (cld->has_accumulated_modified_oops()) { 48 _found = true; 49 } 50 } 51 bool found() { 52 return _found; 53 } 54 }; 55 56 bool CLDRemSet::mod_union_is_clear() { 57 HasAccumulatedModifiedOopsClosure closure; 58 ClassLoaderDataGraph::cld_do(&closure); 59 60 return !closure.found(); 61 } 62 63 64 class ClearCLDModUnionClosure : public CLDClosure { 65 public: 66 void do_cld(ClassLoaderData* cld) { 67 if (cld->has_accumulated_modified_oops()) { 68 cld->clear_accumulated_modified_oops(); 69 } 70 } 71 }; 72 73 void CLDRemSet::clear_mod_union() { 74 ClearCLDModUnionClosure closure; 75 ClassLoaderDataGraph::cld_do(&closure); 76 } 77 78 79 jbyte CardTableRS::find_unused_youngergenP_card_value() { 80 for (jbyte v = youngergenP1_card; 81 v < cur_youngergen_and_prev_nonclean_card; 82 v++) { 83 bool seen = false; 84 for (int g = 0; g < _regions_to_iterate; g++) { 85 if (_last_cur_val_in_gen[g] == v) { 86 seen = true; 87 break; 88 } 89 } 90 if (!seen) { 91 return v; 92 } 93 } 94 ShouldNotReachHere(); 95 return 0; 96 } 97 98 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 99 // Parallel or sequential, we must always set the prev to equal the 100 // last one written. 101 if (parallel) { 102 // Find a parallel value to be used next. 103 jbyte next_val = find_unused_youngergenP_card_value(); 104 set_cur_youngergen_card_val(next_val); 105 106 } else { 107 // In an sequential traversal we will always write youngergen, so that 108 // the inline barrier is correct. 109 set_cur_youngergen_card_val(youngergen_card); 110 } 111 } 112 113 void CardTableRS::younger_refs_iterate(Generation* g, 114 OopsInGenClosure* blk, 115 uint n_threads) { 116 // The indexing in this array is slightly odd. We want to access 117 // the old generation record here, which is at index 2. 118 _last_cur_val_in_gen[2] = cur_youngergen_card_val(); 119 g->younger_refs_iterate(blk, n_threads); 120 } 121 122 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { 123 if (_is_par) { 124 return clear_card_parallel(entry); 125 } else { 126 return clear_card_serial(entry); 127 } 128 } 129 130 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { 131 while (true) { 132 // In the parallel case, we may have to do this several times. 133 jbyte entry_val = *entry; 134 assert(entry_val != CardTableRS::clean_card_val(), 135 "We shouldn't be looking at clean cards, and this should " 136 "be the only place they get cleaned."); 137 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 138 || _ct->is_prev_youngergen_card_val(entry_val)) { 139 jbyte res = 140 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 141 if (res == entry_val) { 142 break; 143 } else { 144 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 145 "The CAS above should only fail if another thread did " 146 "a GC write barrier."); 147 } 148 } else if (entry_val == 149 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 150 // Parallelism shouldn't matter in this case. Only the thread 151 // assigned to scan the card should change this value. 152 *entry = _ct->cur_youngergen_card_val(); 153 break; 154 } else { 155 assert(entry_val == _ct->cur_youngergen_card_val(), 156 "Should be the only possibility."); 157 // In this case, the card was clean before, and become 158 // cur_youngergen only because of processing of a promoted object. 159 // We don't have to look at the card. 160 return false; 161 } 162 } 163 return true; 164 } 165 166 167 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 168 jbyte entry_val = *entry; 169 assert(entry_val != CardTableRS::clean_card_val(), 170 "We shouldn't be looking at clean cards, and this should " 171 "be the only place they get cleaned."); 172 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 173 "This should be possible in the sequential case."); 174 *entry = CardTableRS::clean_card_val(); 175 return true; 176 } 177 178 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 179 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : 180 _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { 181 } 182 183 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 184 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 185 } 186 187 // The regions are visited in *decreasing* address order. 188 // This order aids with imprecise card marking, where a dirty 189 // card may cause scanning, and summarization marking, of objects 190 // that extend onto subsequent cards. 191 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 192 assert(mr.word_size() > 0, "Error"); 193 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 194 // mr.end() may not necessarily be card aligned. 195 jbyte* cur_entry = _ct->byte_for(mr.last()); 196 const jbyte* limit = _ct->byte_for(mr.start()); 197 HeapWord* end_of_non_clean = mr.end(); 198 HeapWord* start_of_non_clean = end_of_non_clean; 199 while (cur_entry >= limit) { 200 HeapWord* cur_hw = _ct->addr_for(cur_entry); 201 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 202 // Continue the dirty range by opening the 203 // dirty window one card to the left. 204 start_of_non_clean = cur_hw; 205 } else { 206 // We hit a "clean" card; process any non-empty 207 // "dirty" range accumulated so far. 208 if (start_of_non_clean < end_of_non_clean) { 209 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 210 _dirty_card_closure->do_MemRegion(mrd); 211 } 212 213 // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary 214 if (is_word_aligned(cur_entry)) { 215 jbyte* cur_row = cur_entry - BytesPerWord; 216 while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) { 217 cur_row -= BytesPerWord; 218 } 219 cur_entry = cur_row + BytesPerWord; 220 cur_hw = _ct->addr_for(cur_entry); 221 } 222 223 // Reset the dirty window, while continuing to look 224 // for the next dirty card that will start a 225 // new dirty window. 226 end_of_non_clean = cur_hw; 227 start_of_non_clean = cur_hw; 228 } 229 // Note that "cur_entry" leads "start_of_non_clean" in 230 // its leftward excursion after this point 231 // in the loop and, when we hit the left end of "mr", 232 // will point off of the left end of the card-table 233 // for "mr". 234 cur_entry--; 235 } 236 // If the first card of "mr" was dirty, we will have 237 // been left with a dirty window, co-initial with "mr", 238 // which we now process. 239 if (start_of_non_clean < end_of_non_clean) { 240 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 241 _dirty_card_closure->do_MemRegion(mrd); 242 } 243 } 244 245 // clean (by dirty->clean before) ==> cur_younger_gen 246 // dirty ==> cur_youngergen_and_prev_nonclean_card 247 // precleaned ==> cur_youngergen_and_prev_nonclean_card 248 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 249 // cur-younger-gen ==> cur_younger_gen 250 // cur_youngergen_and_prev_nonclean_card ==> no change. 251 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 252 volatile jbyte* entry = byte_for(field); 253 do { 254 jbyte entry_val = *entry; 255 // We put this first because it's probably the most common case. 256 if (entry_val == clean_card_val()) { 257 // No threat of contention with cleaning threads. 258 *entry = cur_youngergen_card_val(); 259 return; 260 } else if (card_is_dirty_wrt_gen_iter(entry_val) 261 || is_prev_youngergen_card_val(entry_val)) { 262 // Mark it as both cur and prev youngergen; card cleaning thread will 263 // eventually remove the previous stuff. 264 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 265 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 266 // Did the CAS succeed? 267 if (res == entry_val) return; 268 // Otherwise, retry, to see the new value. 269 continue; 270 } else { 271 assert(entry_val == cur_youngergen_and_prev_nonclean_card 272 || entry_val == cur_youngergen_card_val(), 273 "should be only possibilities."); 274 return; 275 } 276 } while (true); 277 } 278 279 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 280 OopsInGenClosure* cl, 281 uint n_threads) { 282 const MemRegion urasm = sp->used_region_at_save_marks(); 283 #ifdef ASSERT 284 // Convert the assertion check to a warning if we are running 285 // CMS+ParNew until related bug is fixed. 286 MemRegion ur = sp->used_region(); 287 assert(ur.contains(urasm) || (UseConcMarkSweepGC), 288 "Did you forget to call save_marks()? " 289 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 290 "[" PTR_FORMAT ", " PTR_FORMAT ")", 291 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 292 // In the case of CMS+ParNew, issue a warning 293 if (!ur.contains(urasm)) { 294 assert(UseConcMarkSweepGC, "Tautology: see assert above"); 295 log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " 296 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 297 "[" PTR_FORMAT ", " PTR_FORMAT ")", 298 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 299 MemRegion ur2 = sp->used_region(); 300 MemRegion urasm2 = sp->used_region_at_save_marks(); 301 if (!ur.equals(ur2)) { 302 log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); 303 } 304 if (!urasm.equals(urasm2)) { 305 log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); 306 } 307 ShouldNotReachHere(); 308 } 309 #endif 310 non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); 311 } 312 313 void CardTableRS::clear_into_younger(Generation* old_gen) { 314 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 315 "Should only be called for the old generation"); 316 // The card tables for the youngest gen need never be cleared. 317 // There's a bit of subtlety in the clear() and invalidate() 318 // methods that we exploit here and in invalidate_or_clear() 319 // below to avoid missing cards at the fringes. If clear() or 320 // invalidate() are changed in the future, this code should 321 // be revisited. 20040107.ysr 322 clear(old_gen->prev_used_region()); 323 } 324 325 void CardTableRS::invalidate_or_clear(Generation* old_gen) { 326 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 327 "Should only be called for the old generation"); 328 // Invalidate the cards for the currently occupied part of 329 // the old generation and clear the cards for the 330 // unoccupied part of the generation (if any, making use 331 // of that generation's prev_used_region to determine that 332 // region). No need to do anything for the youngest 333 // generation. Also see note#20040107.ysr above. 334 MemRegion used_mr = old_gen->used_region(); 335 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr); 336 if (!to_be_cleared_mr.is_empty()) { 337 clear(to_be_cleared_mr); 338 } 339 invalidate(used_mr); 340 } 341 342 343 class VerifyCleanCardClosure: public OopClosure { 344 private: 345 HeapWord* _boundary; 346 HeapWord* _begin; 347 HeapWord* _end; 348 protected: 349 template <class T> void do_oop_work(T* p) { 350 HeapWord* jp = (HeapWord*)p; 351 assert(jp >= _begin && jp < _end, 352 "Error: jp " PTR_FORMAT " should be within " 353 "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", 354 p2i(jp), p2i(_begin), p2i(_end)); 355 oop obj = RawAccess<>::oop_load(p); 356 guarantee(obj == NULL || (HeapWord*)obj >= _boundary, 357 "pointer " PTR_FORMAT " at " PTR_FORMAT " on " 358 "clean card crosses boundary" PTR_FORMAT, 359 p2i(obj), p2i(jp), p2i(_boundary)); 360 } 361 362 public: 363 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 364 _boundary(b), _begin(begin), _end(end) { 365 assert(b <= begin, 366 "Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, 367 p2i(b), p2i(begin)); 368 assert(begin <= end, 369 "Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, 370 p2i(begin), p2i(end)); 371 } 372 373 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 374 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 375 }; 376 377 class VerifyCTSpaceClosure: public SpaceClosure { 378 private: 379 CardTableRS* _ct; 380 HeapWord* _boundary; 381 public: 382 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 383 _ct(ct), _boundary(boundary) {} 384 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 385 }; 386 387 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 388 CardTableRS* _ct; 389 public: 390 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 391 void do_generation(Generation* gen) { 392 // Skip the youngest generation. 393 if (GenCollectedHeap::heap()->is_young_gen(gen)) { 394 return; 395 } 396 // Normally, we're interested in pointers to younger generations. 397 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 398 gen->space_iterate(&blk, true); 399 } 400 }; 401 402 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 403 // We don't need to do young-gen spaces. 404 if (s->end() <= gen_boundary) return; 405 MemRegion used = s->used_region(); 406 407 jbyte* cur_entry = byte_for(used.start()); 408 jbyte* limit = byte_after(used.last()); 409 while (cur_entry < limit) { 410 if (*cur_entry == clean_card_val()) { 411 jbyte* first_dirty = cur_entry+1; 412 while (first_dirty < limit && 413 *first_dirty == clean_card_val()) { 414 first_dirty++; 415 } 416 // If the first object is a regular object, and it has a 417 // young-to-old field, that would mark the previous card. 418 HeapWord* boundary = addr_for(cur_entry); 419 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 420 HeapWord* boundary_block = s->block_start(boundary); 421 HeapWord* begin = boundary; // Until proven otherwise. 422 HeapWord* start_block = boundary_block; // Until proven otherwise. 423 if (boundary_block < boundary) { 424 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 425 oop boundary_obj = oop(boundary_block); 426 if (!boundary_obj->is_objArray() && 427 !boundary_obj->is_typeArray()) { 428 guarantee(cur_entry > byte_for(used.start()), 429 "else boundary would be boundary_block"); 430 if (*byte_for(boundary_block) != clean_card_val()) { 431 begin = boundary_block + s->block_size(boundary_block); 432 start_block = begin; 433 } 434 } 435 } 436 } 437 // Now traverse objects until end. 438 if (begin < end) { 439 MemRegion mr(begin, end); 440 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 441 for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { 442 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 443 oop(cur)->oop_iterate_no_header(&verify_blk, mr); 444 } 445 } 446 } 447 cur_entry = first_dirty; 448 } else { 449 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 450 // is a transient value, that cannot be in the card table 451 // except during GC, and thus assert that: 452 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 453 // "Illegal CT value"); 454 // That however, need not hold, as will become clear in the 455 // following... 456 457 // We'd normally expect that if we are in the parallel case, 458 // we can't have left a prev value (which would be different 459 // from the current value) in the card table, and so we'd like to 460 // assert that: 461 // guarantee(cur_youngergen_card_val() == youngergen_card 462 // || !is_prev_youngergen_card_val(*cur_entry), 463 // "Illegal CT value"); 464 // That, however, may not hold occasionally, because of 465 // CMS or MSC in the old gen. To wit, consider the 466 // following two simple illustrative scenarios: 467 // (a) CMS: Consider the case where a large object L 468 // spanning several cards is allocated in the old 469 // gen, and has a young gen reference stored in it, dirtying 470 // some interior cards. A young collection scans the card, 471 // finds a young ref and installs a youngergenP_n value. 472 // L then goes dead. Now a CMS collection starts, 473 // finds L dead and sweeps it up. Assume that L is 474 // abutting _unallocated_blk, so _unallocated_blk is 475 // adjusted down to (below) L. Assume further that 476 // no young collection intervenes during this CMS cycle. 477 // The next young gen cycle will not get to look at this 478 // youngergenP_n card since it lies in the unoccupied 479 // part of the space. 480 // Some young collections later the blocks on this 481 // card can be re-allocated either due to direct allocation 482 // or due to absorbing promotions. At this time, the 483 // before-gc verification will fail the above assert. 484 // (b) MSC: In this case, an object L with a young reference 485 // is on a card that (therefore) holds a youngergen_n value. 486 // Suppose also that L lies towards the end of the used 487 // the used space before GC. An MSC collection 488 // occurs that compacts to such an extent that this 489 // card is no longer in the occupied part of the space. 490 // Since current code in MSC does not always clear cards 491 // in the unused part of old gen, this stale youngergen_n 492 // value is left behind and can later be covered by 493 // an object when promotion or direct allocation 494 // re-allocates that part of the heap. 495 // 496 // Fortunately, the presence of such stale card values is 497 // "only" a minor annoyance in that subsequent young collections 498 // might needlessly scan such cards, but would still never corrupt 499 // the heap as a result. However, it's likely not to be a significant 500 // performance inhibitor in practice. For instance, 501 // some recent measurements with unoccupied cards eagerly cleared 502 // out to maintain this invariant, showed next to no 503 // change in young collection times; of course one can construct 504 // degenerate examples where the cost can be significant.) 505 // Note, in particular, that if the "stale" card is modified 506 // after re-allocation, it would be dirty, not "stale". Thus, 507 // we can never have a younger ref in such a card and it is 508 // safe not to scan that card in any collection. [As we see 509 // below, we do some unnecessary scanning 510 // in some cases in the current parallel scanning algorithm.] 511 // 512 // The main point below is that the parallel card scanning code 513 // deals correctly with these stale card values. There are two main 514 // cases to consider where we have a stale "young gen" value and a 515 // "derivative" case to consider, where we have a stale 516 // "cur_younger_gen_and_prev_non_clean" value, as will become 517 // apparent in the case analysis below. 518 // o Case 1. If the stale value corresponds to a younger_gen_n 519 // value other than the cur_younger_gen value then the code 520 // treats this as being tantamount to a prev_younger_gen 521 // card. This means that the card may be unnecessarily scanned. 522 // There are two sub-cases to consider: 523 // o Case 1a. Let us say that the card is in the occupied part 524 // of the generation at the time the collection begins. In 525 // that case the card will be either cleared when it is scanned 526 // for young pointers, or will be set to cur_younger_gen as a 527 // result of promotion. (We have elided the normal case where 528 // the scanning thread and the promoting thread interleave 529 // possibly resulting in a transient 530 // cur_younger_gen_and_prev_non_clean value before settling 531 // to cur_younger_gen. [End Case 1a.] 532 // o Case 1b. Consider now the case when the card is in the unoccupied 533 // part of the space which becomes occupied because of promotions 534 // into it during the current young GC. In this case the card 535 // will never be scanned for young references. The current 536 // code will set the card value to either 537 // cur_younger_gen_and_prev_non_clean or leave 538 // it with its stale value -- because the promotions didn't 539 // result in any younger refs on that card. Of these two 540 // cases, the latter will be covered in Case 1a during 541 // a subsequent scan. To deal with the former case, we need 542 // to further consider how we deal with a stale value of 543 // cur_younger_gen_and_prev_non_clean in our case analysis 544 // below. This we do in Case 3 below. [End Case 1b] 545 // [End Case 1] 546 // o Case 2. If the stale value corresponds to cur_younger_gen being 547 // a value not necessarily written by a current promotion, the 548 // card will not be scanned by the younger refs scanning code. 549 // (This is OK since as we argued above such cards cannot contain 550 // any younger refs.) The result is that this value will be 551 // treated as a prev_younger_gen value in a subsequent collection, 552 // which is addressed in Case 1 above. [End Case 2] 553 // o Case 3. We here consider the "derivative" case from Case 1b. above 554 // because of which we may find a stale 555 // cur_younger_gen_and_prev_non_clean card value in the table. 556 // Once again, as in Case 1, we consider two subcases, depending 557 // on whether the card lies in the occupied or unoccupied part 558 // of the space at the start of the young collection. 559 // o Case 3a. Let us say the card is in the occupied part of 560 // the old gen at the start of the young collection. In that 561 // case, the card will be scanned by the younger refs scanning 562 // code which will set it to cur_younger_gen. In a subsequent 563 // scan, the card will be considered again and get its final 564 // correct value. [End Case 3a] 565 // o Case 3b. Now consider the case where the card is in the 566 // unoccupied part of the old gen, and is occupied as a result 567 // of promotions during thus young gc. In that case, 568 // the card will not be scanned for younger refs. The presence 569 // of newly promoted objects on the card will then result in 570 // its keeping the value cur_younger_gen_and_prev_non_clean 571 // value, which we have dealt with in Case 3 here. [End Case 3b] 572 // [End Case 3] 573 // 574 // (Please refer to the code in the helper class 575 // ClearNonCleanCardWrapper and in CardTable for details.) 576 // 577 // The informal arguments above can be tightened into a formal 578 // correctness proof and it behooves us to write up such a proof, 579 // or to use model checking to prove that there are no lingering 580 // concerns. 581 // 582 // Clearly because of Case 3b one cannot bound the time for 583 // which a card will retain what we have called a "stale" value. 584 // However, one can obtain a Loose upper bound on the redundant 585 // work as a result of such stale values. Note first that any 586 // time a stale card lies in the occupied part of the space at 587 // the start of the collection, it is scanned by younger refs 588 // code and we can define a rank function on card values that 589 // declines when this is so. Note also that when a card does not 590 // lie in the occupied part of the space at the beginning of a 591 // young collection, its rank can either decline or stay unchanged. 592 // In this case, no extra work is done in terms of redundant 593 // younger refs scanning of that card. 594 // Then, the case analysis above reveals that, in the worst case, 595 // any such stale card will be scanned unnecessarily at most twice. 596 // 597 // It is nonetheless advisable to try and get rid of some of this 598 // redundant work in a subsequent (low priority) re-design of 599 // the card-scanning code, if only to simplify the underlying 600 // state machine analysis/proof. ysr 1/28/2002. XXX 601 cur_entry++; 602 } 603 } 604 } 605 606 void CardTableRS::verify() { 607 // At present, we only know how to verify the card table RS for 608 // generational heaps. 609 VerifyCTGenClosure blk(this); 610 GenCollectedHeap::heap()->generation_iterate(&blk, false); 611 CardTable::verify(); 612 } 613 614 CardTableRS::CardTableRS(MemRegion whole_heap) : 615 CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled), 616 _cur_youngergen_card_val(youngergenP1_card), 617 // LNC functionality 618 _lowest_non_clean(NULL), 619 _lowest_non_clean_chunk_size(NULL), 620 _lowest_non_clean_base_chunk_index(NULL), 621 _last_LNC_resizing_collection(NULL) 622 { 623 // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() 624 // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. 625 uint max_gens = 2; 626 _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, 627 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 628 if (_last_cur_val_in_gen == NULL) { 629 vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); 630 } 631 for (uint i = 0; i < max_gens + 1; i++) { 632 _last_cur_val_in_gen[i] = clean_card_val(); 633 } 634 } 635 636 CardTableRS::~CardTableRS() { 637 if (_last_cur_val_in_gen) { 638 FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); 639 _last_cur_val_in_gen = NULL; 640 } 641 if (_lowest_non_clean) { 642 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); 643 _lowest_non_clean = NULL; 644 } 645 if (_lowest_non_clean_chunk_size) { 646 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); 647 _lowest_non_clean_chunk_size = NULL; 648 } 649 if (_lowest_non_clean_base_chunk_index) { 650 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); 651 _lowest_non_clean_base_chunk_index = NULL; 652 } 653 if (_last_LNC_resizing_collection) { 654 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); 655 _last_LNC_resizing_collection = NULL; 656 } 657 } 658 659 void CardTableRS::initialize() { 660 CardTable::initialize(); 661 _lowest_non_clean = 662 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); 663 _lowest_non_clean_chunk_size = 664 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); 665 _lowest_non_clean_base_chunk_index = 666 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); 667 _last_LNC_resizing_collection = 668 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); 669 if (_lowest_non_clean == NULL 670 || _lowest_non_clean_chunk_size == NULL 671 || _lowest_non_clean_base_chunk_index == NULL 672 || _last_LNC_resizing_collection == NULL) 673 vm_exit_during_initialization("couldn't allocate an LNC array."); 674 for (int i = 0; i < _max_covered_regions; i++) { 675 _lowest_non_clean[i] = NULL; 676 _lowest_non_clean_chunk_size[i] = 0; 677 _last_LNC_resizing_collection[i] = -1; 678 } 679 } 680 681 bool CardTableRS::card_will_be_scanned(jbyte cv) { 682 return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv); 683 } 684 685 bool CardTableRS::card_may_have_been_dirty(jbyte cv) { 686 return 687 cv != clean_card && 688 (card_is_dirty_wrt_gen_iter(cv) || 689 CardTableRS::youngergen_may_have_been_dirty(cv)); 690 } 691 692 void CardTableRS::non_clean_card_iterate_possibly_parallel( 693 Space* sp, 694 MemRegion mr, 695 OopsInGenClosure* cl, 696 CardTableRS* ct, 697 uint n_threads) 698 { 699 if (!mr.is_empty()) { 700 if (n_threads > 0) { 701 #if INCLUDE_ALL_GCS 702 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 703 #else // INCLUDE_ALL_GCS 704 fatal("Parallel gc not supported here."); 705 #endif // INCLUDE_ALL_GCS 706 } else { 707 // clear_cl finds contiguous dirty ranges of cards to process and clear. 708 709 // This is the single-threaded version used by DefNew. 710 const bool parallel = false; 711 712 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); 713 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); 714 715 clear_cl.do_MemRegion(mr); 716 } 717 } 718 } 719 720 bool CardTableRS::is_in_young(oop obj) const { 721 return GenCollectedHeap::heap()->is_in_young(obj); 722 }