1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTableRS.hpp" 27 #include "gc/shared/genCollectedHeap.hpp" 28 #include "gc/shared/generation.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/atomic.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/os.hpp" 35 #include "utilities/macros.hpp" 36 37 class HasAccumulatedModifiedOopsClosure : public CLDClosure { 38 bool _found; 39 public: 40 HasAccumulatedModifiedOopsClosure() : _found(false) {} 41 void do_cld(ClassLoaderData* cld) { 42 if (_found) { 43 return; 44 } 45 46 if (cld->has_accumulated_modified_oops()) { 47 _found = true; 48 } 49 } 50 bool found() { 51 return _found; 52 } 53 }; 54 55 bool CLDRemSet::mod_union_is_clear() { 56 HasAccumulatedModifiedOopsClosure closure; 57 ClassLoaderDataGraph::cld_do(&closure); 58 59 return !closure.found(); 60 } 61 62 63 class ClearCLDModUnionClosure : public CLDClosure { 64 public: 65 void do_cld(ClassLoaderData* cld) { 66 if (cld->has_accumulated_modified_oops()) { 67 cld->clear_accumulated_modified_oops(); 68 } 69 } 70 }; 71 72 void CLDRemSet::clear_mod_union() { 73 ClearCLDModUnionClosure closure; 74 ClassLoaderDataGraph::cld_do(&closure); 75 } 76 77 78 jbyte CardTableRS::find_unused_youngergenP_card_value() { 79 for (jbyte v = youngergenP1_card; 80 v < cur_youngergen_and_prev_nonclean_card; 81 v++) { 82 bool seen = false; 83 for (int g = 0; g < _regions_to_iterate; g++) { 84 if (_last_cur_val_in_gen[g] == v) { 85 seen = true; 86 break; 87 } 88 } 89 if (!seen) { 90 return v; 91 } 92 } 93 ShouldNotReachHere(); 94 return 0; 95 } 96 97 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 98 // Parallel or sequential, we must always set the prev to equal the 99 // last one written. 100 if (parallel) { 101 // Find a parallel value to be used next. 102 jbyte next_val = find_unused_youngergenP_card_value(); 103 set_cur_youngergen_card_val(next_val); 104 105 } else { 106 // In an sequential traversal we will always write youngergen, so that 107 // the inline barrier is correct. 108 set_cur_youngergen_card_val(youngergen_card); 109 } 110 } 111 112 void CardTableRS::younger_refs_iterate(Generation* g, 113 OopsInGenClosure* blk, 114 uint n_threads) { 115 // The indexing in this array is slightly odd. We want to access 116 // the old generation record here, which is at index 2. 117 _last_cur_val_in_gen[2] = cur_youngergen_card_val(); 118 g->younger_refs_iterate(blk, n_threads); 119 } 120 121 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { 122 if (_is_par) { 123 return clear_card_parallel(entry); 124 } else { 125 return clear_card_serial(entry); 126 } 127 } 128 129 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { 130 while (true) { 131 // In the parallel case, we may have to do this several times. 132 jbyte entry_val = *entry; 133 assert(entry_val != CardTableRS::clean_card_val(), 134 "We shouldn't be looking at clean cards, and this should " 135 "be the only place they get cleaned."); 136 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 137 || _ct->is_prev_youngergen_card_val(entry_val)) { 138 jbyte res = 139 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 140 if (res == entry_val) { 141 break; 142 } else { 143 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 144 "The CAS above should only fail if another thread did " 145 "a GC write barrier."); 146 } 147 } else if (entry_val == 148 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 149 // Parallelism shouldn't matter in this case. Only the thread 150 // assigned to scan the card should change this value. 151 *entry = _ct->cur_youngergen_card_val(); 152 break; 153 } else { 154 assert(entry_val == _ct->cur_youngergen_card_val(), 155 "Should be the only possibility."); 156 // In this case, the card was clean before, and become 157 // cur_youngergen only because of processing of a promoted object. 158 // We don't have to look at the card. 159 return false; 160 } 161 } 162 return true; 163 } 164 165 166 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 167 jbyte entry_val = *entry; 168 assert(entry_val != CardTableRS::clean_card_val(), 169 "We shouldn't be looking at clean cards, and this should " 170 "be the only place they get cleaned."); 171 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 172 "This should be possible in the sequential case."); 173 *entry = CardTableRS::clean_card_val(); 174 return true; 175 } 176 177 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 178 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : 179 _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { 180 } 181 182 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 183 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 184 } 185 186 // The regions are visited in *decreasing* address order. 187 // This order aids with imprecise card marking, where a dirty 188 // card may cause scanning, and summarization marking, of objects 189 // that extend onto subsequent cards. 190 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 191 assert(mr.word_size() > 0, "Error"); 192 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 193 // mr.end() may not necessarily be card aligned. 194 jbyte* cur_entry = _ct->byte_for(mr.last()); 195 const jbyte* limit = _ct->byte_for(mr.start()); 196 HeapWord* end_of_non_clean = mr.end(); 197 HeapWord* start_of_non_clean = end_of_non_clean; 198 while (cur_entry >= limit) { 199 HeapWord* cur_hw = _ct->addr_for(cur_entry); 200 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 201 // Continue the dirty range by opening the 202 // dirty window one card to the left. 203 start_of_non_clean = cur_hw; 204 } else { 205 // We hit a "clean" card; process any non-empty 206 // "dirty" range accumulated so far. 207 if (start_of_non_clean < end_of_non_clean) { 208 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 209 _dirty_card_closure->do_MemRegion(mrd); 210 } 211 212 // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary 213 if (is_word_aligned(cur_entry)) { 214 jbyte* cur_row = cur_entry - BytesPerWord; 215 while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) { 216 cur_row -= BytesPerWord; 217 } 218 cur_entry = cur_row + BytesPerWord; 219 cur_hw = _ct->addr_for(cur_entry); 220 } 221 222 // Reset the dirty window, while continuing to look 223 // for the next dirty card that will start a 224 // new dirty window. 225 end_of_non_clean = cur_hw; 226 start_of_non_clean = cur_hw; 227 } 228 // Note that "cur_entry" leads "start_of_non_clean" in 229 // its leftward excursion after this point 230 // in the loop and, when we hit the left end of "mr", 231 // will point off of the left end of the card-table 232 // for "mr". 233 cur_entry--; 234 } 235 // If the first card of "mr" was dirty, we will have 236 // been left with a dirty window, co-initial with "mr", 237 // which we now process. 238 if (start_of_non_clean < end_of_non_clean) { 239 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 240 _dirty_card_closure->do_MemRegion(mrd); 241 } 242 } 243 244 // clean (by dirty->clean before) ==> cur_younger_gen 245 // dirty ==> cur_youngergen_and_prev_nonclean_card 246 // precleaned ==> cur_youngergen_and_prev_nonclean_card 247 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 248 // cur-younger-gen ==> cur_younger_gen 249 // cur_youngergen_and_prev_nonclean_card ==> no change. 250 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 251 volatile jbyte* entry = byte_for(field); 252 do { 253 jbyte entry_val = *entry; 254 // We put this first because it's probably the most common case. 255 if (entry_val == clean_card_val()) { 256 // No threat of contention with cleaning threads. 257 *entry = cur_youngergen_card_val(); 258 return; 259 } else if (card_is_dirty_wrt_gen_iter(entry_val) 260 || is_prev_youngergen_card_val(entry_val)) { 261 // Mark it as both cur and prev youngergen; card cleaning thread will 262 // eventually remove the previous stuff. 263 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 264 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 265 // Did the CAS succeed? 266 if (res == entry_val) return; 267 // Otherwise, retry, to see the new value. 268 continue; 269 } else { 270 assert(entry_val == cur_youngergen_and_prev_nonclean_card 271 || entry_val == cur_youngergen_card_val(), 272 "should be only possibilities."); 273 return; 274 } 275 } while (true); 276 } 277 278 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 279 OopsInGenClosure* cl, 280 uint n_threads) { 281 const MemRegion urasm = sp->used_region_at_save_marks(); 282 #ifdef ASSERT 283 // Convert the assertion check to a warning if we are running 284 // CMS+ParNew until related bug is fixed. 285 MemRegion ur = sp->used_region(); 286 assert(ur.contains(urasm) || (UseConcMarkSweepGC), 287 "Did you forget to call save_marks()? " 288 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 289 "[" PTR_FORMAT ", " PTR_FORMAT ")", 290 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 291 // In the case of CMS+ParNew, issue a warning 292 if (!ur.contains(urasm)) { 293 assert(UseConcMarkSweepGC, "Tautology: see assert above"); 294 log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " 295 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 296 "[" PTR_FORMAT ", " PTR_FORMAT ")", 297 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 298 MemRegion ur2 = sp->used_region(); 299 MemRegion urasm2 = sp->used_region_at_save_marks(); 300 if (!ur.equals(ur2)) { 301 log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); 302 } 303 if (!urasm.equals(urasm2)) { 304 log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); 305 } 306 ShouldNotReachHere(); 307 } 308 #endif 309 non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); 310 } 311 312 void CardTableRS::clear_into_younger(Generation* old_gen) { 313 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 314 "Should only be called for the old generation"); 315 // The card tables for the youngest gen need never be cleared. 316 // There's a bit of subtlety in the clear() and invalidate() 317 // methods that we exploit here and in invalidate_or_clear() 318 // below to avoid missing cards at the fringes. If clear() or 319 // invalidate() are changed in the future, this code should 320 // be revisited. 20040107.ysr 321 clear(old_gen->prev_used_region()); 322 } 323 324 void CardTableRS::invalidate_or_clear(Generation* old_gen) { 325 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 326 "Should only be called for the old generation"); 327 // Invalidate the cards for the currently occupied part of 328 // the old generation and clear the cards for the 329 // unoccupied part of the generation (if any, making use 330 // of that generation's prev_used_region to determine that 331 // region). No need to do anything for the youngest 332 // generation. Also see note#20040107.ysr above. 333 MemRegion used_mr = old_gen->used_region(); 334 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr); 335 if (!to_be_cleared_mr.is_empty()) { 336 clear(to_be_cleared_mr); 337 } 338 invalidate(used_mr); 339 } 340 341 342 class VerifyCleanCardClosure: public OopClosure { 343 private: 344 HeapWord* _boundary; 345 HeapWord* _begin; 346 HeapWord* _end; 347 protected: 348 template <class T> void do_oop_work(T* p) { 349 HeapWord* jp = (HeapWord*)p; 350 assert(jp >= _begin && jp < _end, 351 "Error: jp " PTR_FORMAT " should be within " 352 "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", 353 p2i(jp), p2i(_begin), p2i(_end)); 354 oop obj = oopDesc::load_decode_heap_oop(p); 355 guarantee(obj == NULL || (HeapWord*)obj >= _boundary, 356 "pointer " PTR_FORMAT " at " PTR_FORMAT " on " 357 "clean card crosses boundary" PTR_FORMAT, 358 p2i(obj), p2i(jp), p2i(_boundary)); 359 } 360 361 public: 362 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 363 _boundary(b), _begin(begin), _end(end) { 364 assert(b <= begin, 365 "Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, 366 p2i(b), p2i(begin)); 367 assert(begin <= end, 368 "Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, 369 p2i(begin), p2i(end)); 370 } 371 372 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 373 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 374 }; 375 376 class VerifyCTSpaceClosure: public SpaceClosure { 377 private: 378 CardTableRS* _ct; 379 HeapWord* _boundary; 380 public: 381 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 382 _ct(ct), _boundary(boundary) {} 383 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 384 }; 385 386 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 387 CardTableRS* _ct; 388 public: 389 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 390 void do_generation(Generation* gen) { 391 // Skip the youngest generation. 392 if (GenCollectedHeap::heap()->is_young_gen(gen)) { 393 return; 394 } 395 // Normally, we're interested in pointers to younger generations. 396 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 397 gen->space_iterate(&blk, true); 398 } 399 }; 400 401 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 402 // We don't need to do young-gen spaces. 403 if (s->end() <= gen_boundary) return; 404 MemRegion used = s->used_region(); 405 406 jbyte* cur_entry = byte_for(used.start()); 407 jbyte* limit = byte_after(used.last()); 408 while (cur_entry < limit) { 409 if (*cur_entry == clean_card_val()) { 410 jbyte* first_dirty = cur_entry+1; 411 while (first_dirty < limit && 412 *first_dirty == clean_card_val()) { 413 first_dirty++; 414 } 415 // If the first object is a regular object, and it has a 416 // young-to-old field, that would mark the previous card. 417 HeapWord* boundary = addr_for(cur_entry); 418 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 419 HeapWord* boundary_block = s->block_start(boundary); 420 HeapWord* begin = boundary; // Until proven otherwise. 421 HeapWord* start_block = boundary_block; // Until proven otherwise. 422 if (boundary_block < boundary) { 423 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 424 oop boundary_obj = oop(boundary_block); 425 if (!boundary_obj->is_objArray() && 426 !boundary_obj->is_typeArray()) { 427 guarantee(cur_entry > byte_for(used.start()), 428 "else boundary would be boundary_block"); 429 if (*byte_for(boundary_block) != clean_card_val()) { 430 begin = boundary_block + s->block_size(boundary_block); 431 start_block = begin; 432 } 433 } 434 } 435 } 436 // Now traverse objects until end. 437 if (begin < end) { 438 MemRegion mr(begin, end); 439 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 440 for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { 441 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 442 oop(cur)->oop_iterate_no_header(&verify_blk, mr); 443 } 444 } 445 } 446 cur_entry = first_dirty; 447 } else { 448 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 449 // is a transient value, that cannot be in the card table 450 // except during GC, and thus assert that: 451 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 452 // "Illegal CT value"); 453 // That however, need not hold, as will become clear in the 454 // following... 455 456 // We'd normally expect that if we are in the parallel case, 457 // we can't have left a prev value (which would be different 458 // from the current value) in the card table, and so we'd like to 459 // assert that: 460 // guarantee(cur_youngergen_card_val() == youngergen_card 461 // || !is_prev_youngergen_card_val(*cur_entry), 462 // "Illegal CT value"); 463 // That, however, may not hold occasionally, because of 464 // CMS or MSC in the old gen. To wit, consider the 465 // following two simple illustrative scenarios: 466 // (a) CMS: Consider the case where a large object L 467 // spanning several cards is allocated in the old 468 // gen, and has a young gen reference stored in it, dirtying 469 // some interior cards. A young collection scans the card, 470 // finds a young ref and installs a youngergenP_n value. 471 // L then goes dead. Now a CMS collection starts, 472 // finds L dead and sweeps it up. Assume that L is 473 // abutting _unallocated_blk, so _unallocated_blk is 474 // adjusted down to (below) L. Assume further that 475 // no young collection intervenes during this CMS cycle. 476 // The next young gen cycle will not get to look at this 477 // youngergenP_n card since it lies in the unoccupied 478 // part of the space. 479 // Some young collections later the blocks on this 480 // card can be re-allocated either due to direct allocation 481 // or due to absorbing promotions. At this time, the 482 // before-gc verification will fail the above assert. 483 // (b) MSC: In this case, an object L with a young reference 484 // is on a card that (therefore) holds a youngergen_n value. 485 // Suppose also that L lies towards the end of the used 486 // the used space before GC. An MSC collection 487 // occurs that compacts to such an extent that this 488 // card is no longer in the occupied part of the space. 489 // Since current code in MSC does not always clear cards 490 // in the unused part of old gen, this stale youngergen_n 491 // value is left behind and can later be covered by 492 // an object when promotion or direct allocation 493 // re-allocates that part of the heap. 494 // 495 // Fortunately, the presence of such stale card values is 496 // "only" a minor annoyance in that subsequent young collections 497 // might needlessly scan such cards, but would still never corrupt 498 // the heap as a result. However, it's likely not to be a significant 499 // performance inhibitor in practice. For instance, 500 // some recent measurements with unoccupied cards eagerly cleared 501 // out to maintain this invariant, showed next to no 502 // change in young collection times; of course one can construct 503 // degenerate examples where the cost can be significant.) 504 // Note, in particular, that if the "stale" card is modified 505 // after re-allocation, it would be dirty, not "stale". Thus, 506 // we can never have a younger ref in such a card and it is 507 // safe not to scan that card in any collection. [As we see 508 // below, we do some unnecessary scanning 509 // in some cases in the current parallel scanning algorithm.] 510 // 511 // The main point below is that the parallel card scanning code 512 // deals correctly with these stale card values. There are two main 513 // cases to consider where we have a stale "young gen" value and a 514 // "derivative" case to consider, where we have a stale 515 // "cur_younger_gen_and_prev_non_clean" value, as will become 516 // apparent in the case analysis below. 517 // o Case 1. If the stale value corresponds to a younger_gen_n 518 // value other than the cur_younger_gen value then the code 519 // treats this as being tantamount to a prev_younger_gen 520 // card. This means that the card may be unnecessarily scanned. 521 // There are two sub-cases to consider: 522 // o Case 1a. Let us say that the card is in the occupied part 523 // of the generation at the time the collection begins. In 524 // that case the card will be either cleared when it is scanned 525 // for young pointers, or will be set to cur_younger_gen as a 526 // result of promotion. (We have elided the normal case where 527 // the scanning thread and the promoting thread interleave 528 // possibly resulting in a transient 529 // cur_younger_gen_and_prev_non_clean value before settling 530 // to cur_younger_gen. [End Case 1a.] 531 // o Case 1b. Consider now the case when the card is in the unoccupied 532 // part of the space which becomes occupied because of promotions 533 // into it during the current young GC. In this case the card 534 // will never be scanned for young references. The current 535 // code will set the card value to either 536 // cur_younger_gen_and_prev_non_clean or leave 537 // it with its stale value -- because the promotions didn't 538 // result in any younger refs on that card. Of these two 539 // cases, the latter will be covered in Case 1a during 540 // a subsequent scan. To deal with the former case, we need 541 // to further consider how we deal with a stale value of 542 // cur_younger_gen_and_prev_non_clean in our case analysis 543 // below. This we do in Case 3 below. [End Case 1b] 544 // [End Case 1] 545 // o Case 2. If the stale value corresponds to cur_younger_gen being 546 // a value not necessarily written by a current promotion, the 547 // card will not be scanned by the younger refs scanning code. 548 // (This is OK since as we argued above such cards cannot contain 549 // any younger refs.) The result is that this value will be 550 // treated as a prev_younger_gen value in a subsequent collection, 551 // which is addressed in Case 1 above. [End Case 2] 552 // o Case 3. We here consider the "derivative" case from Case 1b. above 553 // because of which we may find a stale 554 // cur_younger_gen_and_prev_non_clean card value in the table. 555 // Once again, as in Case 1, we consider two subcases, depending 556 // on whether the card lies in the occupied or unoccupied part 557 // of the space at the start of the young collection. 558 // o Case 3a. Let us say the card is in the occupied part of 559 // the old gen at the start of the young collection. In that 560 // case, the card will be scanned by the younger refs scanning 561 // code which will set it to cur_younger_gen. In a subsequent 562 // scan, the card will be considered again and get its final 563 // correct value. [End Case 3a] 564 // o Case 3b. Now consider the case where the card is in the 565 // unoccupied part of the old gen, and is occupied as a result 566 // of promotions during thus young gc. In that case, 567 // the card will not be scanned for younger refs. The presence 568 // of newly promoted objects on the card will then result in 569 // its keeping the value cur_younger_gen_and_prev_non_clean 570 // value, which we have dealt with in Case 3 here. [End Case 3b] 571 // [End Case 3] 572 // 573 // (Please refer to the code in the helper class 574 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.) 575 // 576 // The informal arguments above can be tightened into a formal 577 // correctness proof and it behooves us to write up such a proof, 578 // or to use model checking to prove that there are no lingering 579 // concerns. 580 // 581 // Clearly because of Case 3b one cannot bound the time for 582 // which a card will retain what we have called a "stale" value. 583 // However, one can obtain a Loose upper bound on the redundant 584 // work as a result of such stale values. Note first that any 585 // time a stale card lies in the occupied part of the space at 586 // the start of the collection, it is scanned by younger refs 587 // code and we can define a rank function on card values that 588 // declines when this is so. Note also that when a card does not 589 // lie in the occupied part of the space at the beginning of a 590 // young collection, its rank can either decline or stay unchanged. 591 // In this case, no extra work is done in terms of redundant 592 // younger refs scanning of that card. 593 // Then, the case analysis above reveals that, in the worst case, 594 // any such stale card will be scanned unnecessarily at most twice. 595 // 596 // It is nonetheless advisable to try and get rid of some of this 597 // redundant work in a subsequent (low priority) re-design of 598 // the card-scanning code, if only to simplify the underlying 599 // state machine analysis/proof. ysr 1/28/2002. XXX 600 cur_entry++; 601 } 602 } 603 } 604 605 void CardTableRS::verify() { 606 // At present, we only know how to verify the card table RS for 607 // generational heaps. 608 VerifyCTGenClosure blk(this); 609 GenCollectedHeap::heap()->generation_iterate(&blk, false); 610 CardTable::verify(); 611 } 612 613 CardTableRS::CardTableRS(MemRegion whole_heap) : 614 CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled), 615 _cur_youngergen_card_val(youngergenP1_card), 616 // LNC functionality 617 _lowest_non_clean(NULL), 618 _lowest_non_clean_chunk_size(NULL), 619 _lowest_non_clean_base_chunk_index(NULL), 620 _last_LNC_resizing_collection(NULL) 621 { 622 // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() 623 // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. 624 uint max_gens = 2; 625 _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, 626 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 627 if (_last_cur_val_in_gen == NULL) { 628 vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); 629 } 630 for (uint i = 0; i < max_gens + 1; i++) { 631 _last_cur_val_in_gen[i] = clean_card_val(); 632 } 633 } 634 635 CardTableRS::~CardTableRS() { 636 if (_last_cur_val_in_gen) { 637 FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); 638 _last_cur_val_in_gen = NULL; 639 } 640 if (_lowest_non_clean) { 641 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); 642 _lowest_non_clean = NULL; 643 } 644 if (_lowest_non_clean_chunk_size) { 645 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); 646 _lowest_non_clean_chunk_size = NULL; 647 } 648 if (_lowest_non_clean_base_chunk_index) { 649 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); 650 _lowest_non_clean_base_chunk_index = NULL; 651 } 652 if (_last_LNC_resizing_collection) { 653 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); 654 _last_LNC_resizing_collection = NULL; 655 } 656 } 657 658 void CardTableRS::initialize() { 659 CardTable::initialize(); 660 _lowest_non_clean = 661 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); 662 _lowest_non_clean_chunk_size = 663 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); 664 _lowest_non_clean_base_chunk_index = 665 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); 666 _last_LNC_resizing_collection = 667 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); 668 if (_lowest_non_clean == NULL 669 || _lowest_non_clean_chunk_size == NULL 670 || _lowest_non_clean_base_chunk_index == NULL 671 || _last_LNC_resizing_collection == NULL) 672 vm_exit_during_initialization("couldn't allocate an LNC array."); 673 for (int i = 0; i < _max_covered_regions; i++) { 674 _lowest_non_clean[i] = NULL; 675 _lowest_non_clean_chunk_size[i] = 0; 676 _last_LNC_resizing_collection[i] = -1; 677 } 678 } 679 680 bool CardTableRS::card_will_be_scanned(jbyte cv) { 681 return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv); 682 } 683 684 bool CardTableRS::card_may_have_been_dirty(jbyte cv) { 685 return 686 cv != clean_card && 687 (card_is_dirty_wrt_gen_iter(cv) || 688 CardTableRS::youngergen_may_have_been_dirty(cv)); 689 } 690 691 void CardTableRS::non_clean_card_iterate_possibly_parallel( 692 Space* sp, 693 MemRegion mr, 694 OopsInGenClosure* cl, 695 CardTableRS* ct, 696 uint n_threads) 697 { 698 if (!mr.is_empty()) { 699 if (n_threads > 0) { 700 #if INCLUDE_ALL_GCS 701 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 702 #else // INCLUDE_ALL_GCS 703 fatal("Parallel gc not supported here."); 704 #endif // INCLUDE_ALL_GCS 705 } else { 706 // clear_cl finds contiguous dirty ranges of cards to process and clear. 707 708 // This is the single-threaded version used by DefNew. 709 const bool parallel = false; 710 711 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); 712 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); 713 714 clear_cl.do_MemRegion(mr); 715 } 716 } 717 } 718 719 bool CardTableRS::is_in_young(oop obj) const { 720 return GenCollectedHeap::heap()->is_in_young(obj); 721 }